Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 22 Jul 2014 07:44:59 +0000 (00:44 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 22 Jul 2014 07:44:59 +0000 (00:44 -0700)
Conflicts:
drivers/infiniband/hw/cxgb4/device.c

The cxgb4 conflict was simply overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
831 files changed:
Documentation/ABI/testing/sysfs-class-net
Documentation/devicetree/bindings/net/broadcom-systemport.txt
Documentation/devicetree/bindings/net/davinci-mdio.txt
Documentation/devicetree/bindings/net/ieee802154/cc2520.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/marvell-pp2.txt [new file with mode: 0644]
Documentation/networking/bonding.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/pktgen.txt
Documentation/networking/timestamping.txt
MAINTAINERS
drivers/bcma/Makefile
drivers/bcma/driver_gpio.c
drivers/bcma/driver_pcie2.c [new file with mode: 0644]
drivers/bcma/main.c
drivers/bluetooth/Kconfig
drivers/bluetooth/ath3k.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btmrvl_sdio.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_vhci.c
drivers/firewire/net.c
drivers/hsi/clients/ssi_protocol.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/isdn/i4l/isdn_net.c
drivers/media/dvb-core/dvb_net.c
drivers/misc/sgi-xp/xpnet.c
drivers/net/Kconfig
drivers/net/arcnet/arcnet.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/caif/caif_virtio.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/slcan.c
drivers/net/cris/eth_v10.c
drivers/net/dummy.c
drivers/net/eql.c
drivers/net/ethernet/8390/lib8390.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2_fw.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic.h
drivers/net/ethernet/broadcom/cnic_defs.h
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/genet/Makefile
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/cisco/enic/Makefile
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_api.c
drivers/net/ethernet/cisco/enic/enic_clsf.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_clsf.h [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/enic_res.c
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/cisco/enic/vnic_dev.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_enet.h
drivers/net/ethernet/cisco/enic/vnic_rq.h
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_hmc.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/Makefile
drivers/net/ethernet/marvell/mvpp2.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpmac.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/ti/tlan.h
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/defxx.c
drivers/net/fddi/defxx.h
drivers/net/hamradio/6pack.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig
drivers/net/ieee802154/Makefile
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c [new file with mode: 0644]
drivers/net/ieee802154/fakehard.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/irda/kingsun-sir.c
drivers/net/loopback.c
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/slip/slhc.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/vxlan.c
drivers/net/wan/dlci.c
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/lapbether.c
drivers/net/wan/sbni.c
drivers/net/wan/sdla.c
drivers/net/wan/x25_asy.c
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/airo.c
drivers/net/wireless/at76c50x-usb.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar953x_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/channel.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common-beacon.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/tx99.c
drivers/net/wireless/ath/ath9k/wow.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/phy.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/Makefile
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/phy_a.c
drivers/net/wireless/b43/phy_a.h
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/phy_common.h
drivers/net/wireless/b43/phy_ht.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/radio_2057.c
drivers/net/wireless/b43/radio_2057.h
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/b43/tables_nphy.h
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/fwil.c
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/vendor.c [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/vendor.h [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/cw1200/fwio.c
drivers/net/wireless/cw1200/scan.c
drivers/net/wireless/cw1200/scan.h
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/ipw2x00/libipw_module.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/Kconfig
drivers/net/wireless/libertas/cmd.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas/mesh.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/11ac.c
drivers/net/wireless/mwifiex/11ac.h
drivers/net/wireless/mwifiex/11h.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_aggr.h
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfg80211.h
drivers/net/wireless/mwifiex/cfp.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/ethtool.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/usb.h
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/util.h
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/Kconfig
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/rsi/rsi_91x_core.c
drivers/net/wireless/rsi/rsi_91x_debugfs.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/rsi/rsi_91x_mgmt.c
drivers/net/wireless/rsi/rsi_91x_pkt.c
drivers/net/wireless/rsi/rsi_91x_sdio.c
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/net/wireless/rsi/rsi_main.h
drivers/net/wireless/rsi/rsi_mgmt.h
drivers/net/wireless/rsi/rsi_sdio.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00mmio.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c
drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/reg.h
drivers/net/wireless/rtlwifi/rtl8192se/fw.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
drivers/net/wireless/rtlwifi/rtl8723be/reg.h
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl12xx/scan.c
drivers/net/wireless/ti/wl12xx/scan.h
drivers/net/wireless/ti/wl18xx/scan.c
drivers/net/wireless/ti/wl18xx/scan.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/scan.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/ptp/ptp_chardev.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/staging/cxt1e1/linux.c
drivers/staging/gdm724x/gdm_lte.c
drivers/staging/gdm72xx/gdm_wimax.c
drivers/staging/vt6655/wpactl.c
drivers/staging/wlan-ng/p80211netdev.c
drivers/tty/n_gsm.c
drivers/usb/gadget/f_phonet.c
include/linux/arcdevice.h
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_pcie2.h [new file with mode: 0644]
include/linux/crc32.h
include/linux/filter.h
include/linux/ieee80211.h
include/linux/if_bridge.h
include/linux/ipv6.h
include/linux/kernel.h
include/linux/mlx4/device.h
include/linux/netdevice.h
include/linux/ptp_classify.h
include/linux/rndis.h
include/linux/rtnetlink.h
include/linux/skbuff.h
include/linux/spi/cc2520.h [new file with mode: 0644]
include/linux/tcp.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/cfg80211.h
include/net/dcbnl.h
include/net/flow_keys.h
include/net/if_inet6.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ipv6.h
include/net/mac80211.h
include/net/mac802154.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_log.h
include/net/netfilter/xt_log.h [deleted file]
include/net/netns/conntrack.h
include/net/netns/ipv6.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sctp/command.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/sctp/ulpevent.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/udp_tunnel.h [new file with mode: 0644]
include/net/vxlan.h
include/uapi/linux/can/netlink.h
include/uapi/linux/if_link.h
include/uapi/linux/in6.h
include/uapi/linux/ipv6.h
include/uapi/linux/netdevice.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter_bridge/Kbuild
include/uapi/linux/netfilter_bridge/ebt_ulog.h [deleted file]
include/uapi/linux/netfilter_ipv4/Kbuild
include/uapi/linux/netfilter_ipv4/ipt_ULOG.h [deleted file]
include/uapi/linux/nl80211.h
include/uapi/linux/sctp.h
include/uapi/linux/sysctl.h
include/uapi/linux/tipc_config.h
kernel/sysctl_binary.c
lib/crc32.c
lib/dynamic_debug.c
lib/net_utils.c
net/802/fc.c
net/802/fddi.c
net/802/hippi.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/client.c
net/appletalk/ddp.c
net/appletalk/dev.c
net/atm/br2684.c
net/atm/clip.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/bluetooth/6lowpan.c
net/bluetooth/6lowpan.h [deleted file]
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebt_ulog.c [deleted file]
net/bridge/netfilter/nf_log_bridge.c [new file with mode: 0644]
net/caif/caif_socket.c
net/caif/cfctrl.c
net/core/dev.c
net/core/drop_monitor.c
net/core/filter.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/ptp_classifier.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/timestamping.c
net/dcb/dcbnl.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dsa/dsa.c
net/dsa/slave.c
net/ethernet/eth.c
net/hsr/Makefile
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_forward.c [new file with mode: 0644]
net/hsr/hsr_forward.h [new file with mode: 0644]
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/hsr/hsr_main.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/hsr/hsr_netlink.h
net/hsr/hsr_slave.c [new file with mode: 0644]
net/hsr/hsr_slave.h [new file with mode: 0644]
net/ieee802154/6lowpan_iphc.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/af_ieee802154.c
net/ieee802154/dgram.c
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/raw.c
net/ieee802154/reassembly.c
net/ieee802154/wpan-class.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/datagram.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_ULOG.c [deleted file]
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_log_arp.c [new file with mode: 0644]
net/ipv4/netfilter/nf_log_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/raw.c
net/ipv4/syncookies.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel.c [new file with mode: 0644]
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/nf_log_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/raw.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/af_irda.c
net/irda/irda_device.c
net/irda/irlan/irlan_common.c
net/irda/irlan/irlan_eth.c
net/irda/irlmp.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/Kconfig
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/mac80211/Kconfig
net/mac80211/Makefile
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ethtool.c [new file with mode: 0644]
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/pm.c
net/mac80211/rate.h
net/mac80211/rc80211_pid.h [deleted file]
net/mac80211/rc80211_pid_algo.c [deleted file]
net/mac80211/rc80211_pid_debugfs.c [deleted file]
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/wep.c
net/mac802154/ieee802154_dev.c
net/mac802154/llsec.c
net/mac802154/mib.c
net/mac802154/tx.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_log.c
net/netfilter/nf_log_common.c [new file with mode: 0644]
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_proto_common.c
net/netfilter/nf_nat_proto_dccp.c
net/netfilter/nf_nat_proto_sctp.c
net/netfilter/nf_nat_proto_tcp.c
net/netfilter/nf_nat_proto_udp.c
net/netfilter/nf_nat_proto_udplite.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_log.c
net/netfilter/x_tables.c
net/netfilter/xt_LOG.c
net/netlabel/netlabel_kapi.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/digital_dep.c
net/openvswitch/datapath.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-internal_dev.h
net/openvswitch/vport-vxlan.c
net/packet/af_packet.c
net/phonet/pep-gprs.c
net/rose/af_rose.c
net/rxrpc/ar-key.c
net/sched/act_mirred.c
net/sched/cls_api.c
net/sched/em_canid.c
net/sched/sch_generic.c
net/sched/sch_teql.c
net/sctp/Makefile
net/sctp/command.c [deleted file]
net/sctp/outqueue.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpevent.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/net.c
net/tipc/net.h
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/socket.h
net/wireless/core.c
net/wireless/ethtool.c
net/wireless/ethtool.h [deleted file]
net/wireless/nl80211.c
net/wireless/rdev-ops.h
net/wireless/trace.h

index 416c5d59f52eaf02081b88901cb0f165713e7e6a..d322b0581194a64a3e9791ef7864d17a95cce985 100644 (file)
@@ -1,3 +1,14 @@
+What:          /sys/class/net/<iface>/name_assign_type
+Date:          July 2014
+KernelVersion: 3.17
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the name assignment type. Possible values are:
+               1: enumerated by the kernel, possibly in an unpredictable way
+               2: predictably named by the kernel
+               3: named by userspace
+               4: renamed
+
 What:          /sys/class/net/<iface>/addr_assign_type
 Date:          July 2010
 KernelVersion: 3.2
index c183ea90d9bc5b08e7980d4e9bdb9dd386957077..aa7ad622259d991fbfdbfafa303285c09527ce6e 100644 (file)
@@ -4,7 +4,8 @@ Required properties:
 - compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
 - reg: address and length of the register set for the device.
 - interrupts: interrupts for the device, first cell must be for the the rx
-  interrupts, and the second cell should be for the transmit queues
+  interrupts, and the second cell should be for the transmit queues. An
+  optional third interrupt cell for Wake-on-LAN can be specified
 - local-mac-address: Ethernet MAC address (48 bits) of this adapter
 - phy-mode: Should be a string describing the PHY interface to the
   Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
index 72efaaf764f728479841b9941ee4e23c0ec52487..0369e25aabd2dd8cbc6be196130d845394b050ac 100644 (file)
@@ -1,8 +1,8 @@
-TI SoC Davinci MDIO Controller Device Tree Bindings
+TI SoC Davinci/Keystone2 MDIO Controller Device Tree Bindings
 ---------------------------------------------------
 
 Required properties:
-- compatible           : Should be "ti,davinci_mdio"
+- compatible           : Should be "ti,davinci_mdio" or "ti,keystone_mdio"
 - reg                  : physical base address and size of the davinci mdio
                          registers map
 - bus_freq             : Mdio Bus frequency
@@ -19,7 +19,7 @@ file.
 Examples:
 
        mdio: davinci_mdio@4A101000 {
-               compatible = "ti,cpsw";
+               compatible = "ti,davinci_mdio";
                reg = <0x4A101000 0x1000>;
                bus_freq = <1000000>;
        };
@@ -27,7 +27,7 @@ Examples:
 (or)
 
        mdio: davinci_mdio@4A101000 {
-               compatible = "ti,cpsw";
+               compatible = "ti,davinci_mdio";
                ti,hwmods = "davinci_mdio";
                bus_freq = <1000000>;
        };
diff --git a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
new file mode 100644 (file)
index 0000000..0071883
--- /dev/null
@@ -0,0 +1,29 @@
+*CC2520 IEEE 802.15.4 Compatible Radio*
+
+Required properties:
+       - compatible:           should be "ti,cc2520"
+       - spi-max-frequency:    maximal bus speed (8000000), should be set to 4000000 depends
+                               sync or async operation mode
+       - reg:                  the chipselect index
+       - pinctrl-0:            pin control group to be used for this controller.
+       - pinctrl-names:        must contain a "default" entry.
+       - fifo-gpio:            GPIO spec for the FIFO pin
+       - fifop-gpio:           GPIO spec for the FIFOP pin
+       - sfd-gpio:             GPIO spec for the SFD pin
+       - cca-gpio:             GPIO spec for the CCA pin
+       - vreg-gpio:            GPIO spec for the VREG pin
+       - reset-gpio:           GPIO spec for the RESET pin
+Example:
+       cc2520@0 {
+               compatible = "ti,cc2520";
+               reg = <0>;
+               spi-max-frequency = <4000000>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&cc2520_cape_pins>;
+               fifo-gpio = <&gpio1 18 0>;
+               fifop-gpio = <&gpio1 19 0>;
+               sfd-gpio = <&gpio1 13 0>;
+               cca-gpio = <&gpio1 16 0>;
+               vreg-gpio = <&gpio0 31 0>;
+               reset-gpio = <&gpio1 12 0>;
+       };
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
new file mode 100644 (file)
index 0000000..aa4f423
--- /dev/null
@@ -0,0 +1,61 @@
+* Marvell Armada 375 Ethernet Controller (PPv2)
+
+Required properties:
+
+- compatible: should be "marvell,armada-375-pp2"
+- reg: addresses and length of the register sets for the device.
+  Must contain the following register sets:
+       - common controller registers
+       - LMS registers
+  In addition, at least one port register set is required.
+- clocks: a pointer to the reference clocks for this device, consequently:
+       - main controller clock
+       - GOP clock
+- clock-names: names of used clocks, must be "pp_clk" and "gop_clk".
+
+The ethernet ports are represented by subnodes. At least one port is
+required.
+
+Required properties (port):
+
+- interrupts: interrupt for the port
+- port-id: should be '0' or '1' for ethernet ports, and '2' for the
+           loopback port
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties (port):
+
+- marvell,loopback: port is loopback mode
+- phy: a phandle to a phy node defining the PHY address (as the reg
+  property, a single integer). Note: if this property isn't present,
+  then fixed link is assumed, and the 'fixed-link' property is
+  mandatory.
+
+Example:
+
+ethernet@f0000 {
+       compatible = "marvell,armada-375-pp2";
+       reg = <0xf0000 0xa000>,
+             <0xc0000 0x3060>,
+             <0xc4000 0x100>,
+             <0xc5000 0x100>;
+       clocks = <&gateclk 3>, <&gateclk 19>;
+       clock-names = "pp_clk", "gop_clk";
+       status = "okay";
+
+       eth0: eth0@c4000 {
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <0>;
+               status = "okay";
+               phy = <&phy0>;
+               phy-mode = "gmii";
+       };
+
+       eth1: eth1@c5000 {
+               interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <1>;
+               status = "okay";
+               phy = <&phy3>;
+               phy-mode = "gmii";
+       };
+};
index 9c723ecd00251534a0b011c4e0ffbd053242454e..eeb5b2e97bedac5ce910a06cc03bf42035d544d4 100644 (file)
@@ -542,10 +542,10 @@ mode
 
                XOR policy: Transmit based on the selected transmit
                hash policy.  The default policy is a simple [(source
-               MAC address XOR'd with destination MAC address) modulo
-               slave count].  Alternate transmit policies may be
-               selected via the xmit_hash_policy option, described
-               below.
+               MAC address XOR'd with destination MAC address XOR
+               packet type ID) modulo slave count].  Alternate transmit
+               policies may be selected via the xmit_hash_policy option,
+               described below.
 
                This mode provides load balancing and fault tolerance.
 
@@ -801,10 +801,11 @@ xmit_hash_policy
 
        layer2
 
-               Uses XOR of hardware MAC addresses to generate the
-               hash.  The formula is
+               Uses XOR of hardware MAC addresses and packet type ID
+               field to generate the hash. The formula is
 
-               (source MAC XOR destination MAC) modulo slave count
+               hash = source MAC XOR destination MAC XOR packet type ID
+               slave number = hash modulo slave count
 
                This algorithm will place all traffic to a particular
                network peer on the same slave.
@@ -819,7 +820,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and IP addresses to
                generate the hash.  The formula is
 
-               hash = source MAC XOR destination MAC
+               hash = source MAC XOR destination MAC XOR packet type ID
                hash = hash XOR source IP XOR destination IP
                hash = hash XOR (hash RSHIFT 16)
                hash = hash XOR (hash RSHIFT 8)
@@ -2301,13 +2302,13 @@ broadcast: Like active-backup, there is not much advantage to this
        bandwidth.  
 
        Additionally, the linux bonding 802.3ad implementation
-       distributes traffic by peer (using an XOR of MAC addresses),
-       so in a "gatewayed" configuration, all outgoing traffic will
-       generally use the same device.  Incoming traffic may also end
-       up on a single device, but that is dependent upon the
-       balancing policy of the peer's 8023.ad implementation.  In a
-       "local" configuration, traffic will be distributed across the
-       devices in the bond.
+       distributes traffic by peer (using an XOR of MAC addresses
+       and packet type ID), so in a "gatewayed" configuration, all
+       outgoing traffic will generally use the same device.  Incoming
+       traffic may also end up on a single device, but that is
+       dependent upon the balancing policy of the peer's 8023.ad
+       implementation.  In a "local" configuration, traffic will be
+       distributed across the devices in the bond.
 
        Finally, the 802.3ad mode mandates the use of the MII monitor,
        therefore, the ARP monitor is not available in this mode.
index ab42c95f9985c0172109308c052cfe426a5bed9a..f35bfe43bf7abac26690328c5332d154023fd818 100644 (file)
@@ -1132,6 +1132,15 @@ flowlabel_consistency - BOOLEAN
        FALSE: disabled
        Default: TRUE
 
+auto_flowlabels - BOOLEAN
+       Automatically generate flow labels based based on a flow hash
+       of the packet. This allows intermediate devices, such as routers,
+       to idenfify packet flows for mechanisms like Equal Cost Multipath
+       Routing (see RFC 6438).
+       TRUE: enabled
+       FALSE: disabled
+       Default: false
+
 anycast_src_echo_reply - BOOLEAN
        Controls the use of anycast addresses as source addresses for ICMPv6
        echo reply
@@ -1210,6 +1219,18 @@ accept_ra_defrtr - BOOLEAN
        Functional default: enabled if accept_ra is enabled.
                            disabled if accept_ra is disabled.
 
+accept_ra_from_local - BOOLEAN
+       Accept RA with source-address that is found on local machine
+        if the RA is otherwise proper and able to be accepted.
+        Default is to NOT accept these as it may be an un-intended
+        network loop.
+
+       Functional default:
+           enabled if accept_ra_from_local is enabled
+               on a specific interface.
+          disabled if accept_ra_from_local is disabled
+               on a specific interface.
+
 accept_ra_pinfo - BOOLEAN
        Learn Prefix Information in Router Advertisement.
 
index 0e30c7845b2b316cb4bb7cdf32807aaf18a55610..0dffc6e3790215b90b3208e50cd8316222289bd4 100644 (file)
@@ -24,6 +24,34 @@ For monitoring and control pktgen creates:
         /proc/net/pktgen/ethX
 
 
+Tuning NIC for max performance
+==============================
+
+The default NIC setting are (likely) not tuned for pktgen's artificial
+overload type of benchmarking, as this could hurt the normal use-case.
+
+Specifically increasing the TX ring buffer in the NIC:
+ # ethtool -G ethX tx 1024
+
+A larger TX ring can improve pktgen's performance, while it can hurt
+in the general case, 1) because the TX ring buffer might get larger
+than the CPUs L1/L2 cache, 2) because it allow more queueing in the
+NIC HW layer (which is bad for bufferbloat).
+
+One should be careful to conclude, that packets/descriptors in the HW
+TX ring cause delay.  Drivers usually delay cleaning up the
+ring-buffers (for various performance reasons), thus packets stalling
+the TX ring, might just be waiting for cleanup.
+
+This cleanup issues is specifically the case, for the driver ixgbe
+(Intel 82599 chip).  This driver (ixgbe) combine TX+RX ring cleanups,
+and the cleanup interval is affected by the ethtool --coalesce setting
+of parameter "rx-usecs".
+
+For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6):
+ # ethtool -C ethX rx-usecs 30
+
+
 Viewing threads
 ===============
 /proc/net/pktgen/kpktgend_0 
index bc35541249032c4d64c8888741eddc64f87e4f2e..8b4ad809df277f9ac44604f71896bf4e6d51eadb 100644 (file)
@@ -40,7 +40,7 @@ the set bits correspond to data that is available, then the control
 message will not be generated:
 
 SOF_TIMESTAMPING_SOFTWARE:     report systime if available
-SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
+SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available (deprecated)
 SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
 
 It is worth noting that timestamps may be collected for reasons other
@@ -94,7 +94,13 @@ not perfect; as a consequence, sorting packets received via different
 NICs by their hwtimetrans may differ from the order in which they were
 received. hwtimetrans may be non-monotonic even for the same NIC.
 Filled in if SOF_TIMESTAMPING_SYS_HARDWARE is set. Requires support
-by the network device and will be empty without that support.
+by the network device and will be empty without that support. This
+field is DEPRECATED. Only one driver computes this value. New device
+drivers must leave this zero. Instead, they can expose the hardware
+clock device on the NIC directly as a HW PTP clock source, to allow
+time conversion in userspace and optionally synchronize system time
+with a userspace PTP stack such as linuxptp. For the PTP clock API,
+see Documentation/ptp/ptp.txt.
 
 
 SIOCSHWTSTAMP, SIOCGHWTSTAMP:
index d76e07798e052b8fcb558a3fb72d0321530d3f6d..78215a5dea28c889c8b8bca0f45c1127bf232830 100644 (file)
@@ -1925,7 +1925,8 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/genet/
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M:     Michael Chan <mchan@broadcom.com>
+M:     Sony Chacko <sony.chacko@qlogic.com>
+M:     Dept-HSGLinuxNICDev@qlogic.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2.*
@@ -1970,7 +1971,7 @@ F:        arch/arm/boot/dts/bcm5301x.dtsi
 F:     arch/arm/boot/dts/bcm470*
 
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
-M:     Nithin Nayak Sujir <nsujir@broadcom.com>
+M:     Prashant Sreedharan <prashant@broadcom.com>
 M:     Michael Chan <mchan@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -5655,16 +5656,6 @@ F:       Documentation/networking/mac80211-injection.txt
 F:     include/net/mac80211.h
 F:     net/mac80211/
 
-MAC80211 PID RATE CONTROL
-M:     Stefano Brivio <stefano.brivio@polimi.it>
-M:     Mattias Nissler <mattias.nissler@gmx.de>
-L:     linux-wireless@vger.kernel.org
-W:     http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
-S:     Maintained
-F:     net/mac80211/rc80211_pid*
-
 MACVLAN DRIVER
 M:     Patrick McHardy <kaber@trash.net>
 L:     netdev@vger.kernel.org
index 734b32f09c0a21aa7ef734100400a47f13f485ad..91290f7f61b8c2b903d874345b6c32896da6114a 100644 (file)
@@ -3,6 +3,7 @@ bcma-y                                  += driver_chipcommon.o driver_chipcommon_pmu.o
 bcma-$(CONFIG_BCMA_SFLASH)             += driver_chipcommon_sflash.o
 bcma-$(CONFIG_BCMA_NFLASH)             += driver_chipcommon_nflash.o
 bcma-y                                 += driver_pci.o
+bcma-y                                 += driver_pcie2.o
 bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE)        += driver_pci_host.o
 bcma-$(CONFIG_BCMA_DRIVER_MIPS)                += driver_mips.o
 bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN)    += driver_gmac_cmn.o
index d7f81ad56b8af731ea10fb863460cebb717dddc0..aec9f850b4a80eba3ea7c0b06bd5df647829980e 100644 (file)
@@ -220,6 +220,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 #endif
        switch (cc->core->bus->chipinfo.id) {
        case BCMA_CHIP_ID_BCM5357:
+       case BCMA_CHIP_ID_BCM53572:
                chip->ngpio     = 32;
                break;
        default:
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
new file mode 100644 (file)
index 0000000..e4be537
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Broadcom specific AMBA
+ * PCIe Gen 2 Core
+ *
+ * Copyright 2014, Broadcom Corporation
+ * Copyright 2014, RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+#if 0
+static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr)
+{
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
+       pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR);
+       return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
+}
+#endif
+
+static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr,
+                                     u32 val)
+{
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val);
+}
+
+/**************************************************
+ * Init.
+ **************************************************/
+
+static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2,
+                                               bool enable)
+{
+       u32 val;
+
+       /* restore back to default */
+       val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
+       val |= PCIE2_CLKC_DLYPERST;
+       val &= ~PCIE2_CLKC_DISSPROMLD;
+       if (enable) {
+               val &= ~PCIE2_CLKC_DLYPERST;
+               val |= PCIE2_CLKC_DISSPROMLD;
+       }
+       pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val);
+       /* flush */
+       return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
+}
+
+static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2)
+{
+       /* LTR0 */
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844);
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c);
+       /* LTR1 */
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848);
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864);
+       /* LTR2 */
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C);
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003);
+}
+
+static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
+{
+       u8 core_rev = pcie2->core->id.rev;
+       u32 devstsctr2;
+
+       if (core_rev < 2 || core_rev == 10 || core_rev > 13)
+               return;
+
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+                     PCIE2_CAP_DEVSTSCTRL2_OFFSET);
+       devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
+       if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) {
+               /* force the right LTR values */
+               bcma_core_pcie2_set_ltr_vals(pcie2);
+
+               /* TODO:
+               si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+
+               /* enable the LTR */
+               devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+                             PCIE2_CAP_DEVSTSCTRL2_OFFSET);
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2);
+
+               /* set the LTR state to be active */
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
+                             PCIE2_LTR_ACTIVE);
+               usleep_range(1000, 2000);
+
+               /* set the LTR state to be sleep */
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
+                             PCIE2_LTR_SLEEP);
+               usleep_range(1000, 2000);
+       }
+}
+
+static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2)
+{
+       u8 core_rev = pcie2->core->id.rev;
+       bool pciewar160, pciewar162;
+
+       pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11;
+       pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 ||
+                    core_rev == 9 || core_rev == 11;
+
+       if (!pciewar160 && !pciewar162)
+               return;
+
+/* TODO */
+#if 0
+       pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL,
+                   PCIE_DISABLE_L1CLK_GATING);
+#if 0
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+                     PCIEGEN2_COE_PVT_TL_CTRL_0);
+       pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA,
+                    ~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT));
+#endif
+#endif
+}
+
+static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2)
+{
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP);
+       pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f);
+}
+
+static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2)
+{
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX);
+       pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0);
+}
+
+static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
+{
+       struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc;
+       u8 core_rev = pcie2->core->id.rev;
+       u32 alp_khz, pm_value;
+
+       if (core_rev <= 13) {
+               alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000;
+               pm_value = (1000000 * 2) / alp_khz;
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+                             PCIE2_PVT_REG_PM_CLK_PERIOD);
+               pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value);
+       }
+}
+
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+       struct bcma_chipinfo *ci = &pcie2->core->bus->chipinfo;
+       u32 tmp;
+
+       tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
+       if ((tmp & 0xe) >> 1 == 2)
+               bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
+
+       /* TODO: Do we need pcie_reqsize? */
+
+       if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
+               bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
+       bcma_core_pcie2_hw_ltr_war(pcie2);
+       pciedev_crwlpciegen2(pcie2);
+       pciedev_reg_pm_clk_period(pcie2);
+       pciedev_crwlpciegen2_180(pcie2);
+       pciedev_crwlpciegen2_182(pcie2);
+}
index 34ea4c588d36bd798ee83bc9db6273eba6baf034..0ff8d58831ef30fcbe7a5452baa8b00e3fdababa 100644 (file)
@@ -132,6 +132,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
                case BCMA_CORE_CHIPCOMMON:
                case BCMA_CORE_PCI:
                case BCMA_CORE_PCIE:
+               case BCMA_CORE_PCIE2:
                case BCMA_CORE_MIPS_74K:
                case BCMA_CORE_4706_MAC_GBIT_COMMON:
                        continue;
@@ -281,6 +282,13 @@ int bcma_bus_register(struct bcma_bus *bus)
                bcma_core_pci_init(&bus->drv_pci[1]);
        }
 
+       /* Init PCIe Gen 2 core */
+       core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
+       if (core) {
+               bus->drv_pcie2.core = core;
+               bcma_core_pcie2_init(&bus->drv_pcie2);
+       }
+
        /* Init GBIT MAC COMMON core */
        core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
        if (core) {
index f5ce64e03fd7bad524d6c172afcd204854202195..fa7fd62ddffa429234323a4b0df7d5b1c5c5c6ed 100644 (file)
@@ -30,8 +30,8 @@ config BT_HCIUART
        help
          Bluetooth HCI UART driver.
          This driver is required if you want to use Bluetooth devices with
-         serial port interface. You will also need this driver if you have 
-         UART based Bluetooth PCMCIA and CF devices like Xircom Credit Card 
+         serial port interface. You will also need this driver if you have
+         UART based Bluetooth PCMCIA and CF devices like Xircom Credit Card
          adapter and BrainBoxes Bluetooth PC Card.
 
          Say Y here to compile support for Bluetooth UART devices into the
@@ -41,9 +41,9 @@ config BT_HCIUART_H4
        bool "UART (H4) protocol support"
        depends on BT_HCIUART
        help
-         UART (H4) is serial protocol for communication between Bluetooth 
-         device and host. This protocol is required for most Bluetooth devices 
-         with UART interface, including PCMCIA and CF cards. 
+         UART (H4) is serial protocol for communication between Bluetooth
+         device and host. This protocol is required for most Bluetooth devices
+         with UART interface, including PCMCIA and CF cards.
 
          Say Y here to compile support for HCI UART (H4) protocol.
 
@@ -52,7 +52,7 @@ config BT_HCIUART_BCSP
        depends on BT_HCIUART
        select BITREVERSE
        help
-         BCSP (BlueCore Serial Protocol) is serial protocol for communication 
+         BCSP (BlueCore Serial Protocol) is serial protocol for communication
          between Bluetooth device and host. This protocol is required for non
          USB Bluetooth devices based on CSR BlueCore chip, including PCMCIA and
          CF cards.
index f50dffc0374fb4ca9222d75683523c73a9dfce43..230c552daf91db15f38c089b9a64fe7c8a4a045f 100644 (file)
@@ -103,6 +103,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x13d3, 0x3393) },
        { USB_DEVICE(0x13d3, 0x3402) },
+       { USB_DEVICE(0x13d3, 0x3432) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@ -152,6 +153,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
@@ -288,10 +290,10 @@ static int ath3k_load_fwfile(struct usb_device *udev,
        sent += size;
        count -= size;
 
+       pipe = usb_sndbulkpipe(udev, 0x02);
+
        while (count) {
                size = min_t(uint, count, BULK_SIZE);
-               pipe = usb_sndbulkpipe(udev, 0x02);
-
                memcpy(send_buf, firmware->data + sent, size);
 
                err = usb_bulk_msg(udev, pipe, send_buf, size,
index dc79f88f8717f478c8d8ab6d0a7d45849f772ac1..caf684119a4e38d5da089fcebf9289f0acb89c0d 100644 (file)
@@ -68,6 +68,7 @@ struct btmrvl_adapter {
        u8 hs_state;
        u8 wakeup_tries;
        wait_queue_head_t cmd_wait_q;
+       wait_queue_head_t event_hs_wait_q;
        u8 cmd_complete;
        bool is_suspended;
 };
@@ -89,6 +90,7 @@ struct btmrvl_private {
 #define MRVL_VENDOR_PKT                        0xFE
 
 /* Vendor specific Bluetooth commands */
+#define BT_CMD_PSCAN_WIN_REPORT_ENABLE 0xFC03
 #define BT_CMD_AUTO_SLEEP_MODE         0xFC23
 #define BT_CMD_HOST_SLEEP_CONFIG       0xFC59
 #define BT_CMD_HOST_SLEEP_ENABLE       0xFC5A
@@ -143,6 +145,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
 int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
 
 int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd);
+int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd);
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv);
 int btmrvl_enable_ps(struct btmrvl_private *priv);
 int btmrvl_prepare_command(struct btmrvl_private *priv);
index e9dbddb0b8f1efb1f15ede65f50d80390ca370c3..cc65fd2fe856cb992b81da7c9f45e3f509757a54 100644 (file)
@@ -114,6 +114,7 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
                        adapter->hs_state = HS_ACTIVATED;
                        if (adapter->psmode)
                                adapter->ps_state = PS_SLEEP;
+                       wake_up_interruptible(&adapter->event_hs_wait_q);
                        BT_DBG("HS ACTIVATED!");
                } else {
                        BT_DBG("HS Enable failed");
@@ -214,6 +215,23 @@ int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd)
 }
 EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
 
+int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd)
+{
+       struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
+       int ret;
+
+       if (!card->support_pscan_win_report)
+               return 0;
+
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_PSCAN_WIN_REPORT_ENABLE,
+                                  &subcmd, 1);
+       if (ret)
+               BT_ERR("PSCAN_WIN_REPORT_ENABLE command failed: %#x", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(btmrvl_pscan_window_reporting);
+
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
 {
        int ret;
@@ -253,11 +271,31 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
 
 int btmrvl_enable_hs(struct btmrvl_private *priv)
 {
+       struct btmrvl_adapter *adapter = priv->adapter;
        int ret;
 
        ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
-       if (ret)
+       if (ret) {
                BT_ERR("Host sleep enable command failed\n");
+               return ret;
+       }
+
+       ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
+                                              adapter->hs_state,
+                       msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
+       if (ret < 0) {
+               BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
+                      ret, adapter->hs_state, adapter->ps_state,
+                      adapter->wakeup_tries);
+       } else if (!ret) {
+               BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state,
+                      adapter->ps_state, adapter->wakeup_tries);
+               ret = -ETIMEDOUT;
+       } else {
+               BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state,
+                      adapter->ps_state, adapter->wakeup_tries);
+               ret = 0;
+       }
 
        return ret;
 }
@@ -358,6 +396,7 @@ static void btmrvl_init_adapter(struct btmrvl_private *priv)
        }
 
        init_waitqueue_head(&priv->adapter->cmd_wait_q);
+       init_waitqueue_head(&priv->adapter->event_hs_wait_q);
 }
 
 static void btmrvl_free_adapter(struct btmrvl_private *priv)
@@ -489,6 +528,8 @@ static int btmrvl_setup(struct hci_dev *hdev)
 
        btmrvl_cal_data_dt(priv);
 
+       btmrvl_pscan_window_reporting(priv, 0x01);
+
        priv->btmrvl_dev.psmode = 1;
        btmrvl_enable_ps(priv);
 
@@ -666,6 +707,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv)
        hdev = priv->btmrvl_dev.hcidev;
 
        wake_up_interruptible(&priv->adapter->cmd_wait_q);
+       wake_up_interruptible(&priv->adapter->event_hs_wait_q);
 
        kthread_stop(priv->main_thread.task);
 
index 9dedca516ff50567a278fb9a511dbcdfd7a1980c..efff06438b024cc2829777e2df7d3788cc9566ed 100644 (file)
@@ -108,6 +108,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
        .helper         = "mrvl/sd8688_helper.bin",
        .firmware       = "mrvl/sd8688.bin",
        .reg            = &btmrvl_reg_8688,
+       .support_pscan_win_report = false,
        .sd_blksz_fw_dl = 64,
 };
 
@@ -115,6 +116,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8787_uapsta.bin",
        .reg            = &btmrvl_reg_87xx,
+       .support_pscan_win_report = false,
        .sd_blksz_fw_dl = 256,
 };
 
@@ -122,6 +124,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8797_uapsta.bin",
        .reg            = &btmrvl_reg_87xx,
+       .support_pscan_win_report = false,
        .sd_blksz_fw_dl = 256,
 };
 
@@ -129,6 +132,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8897_uapsta.bin",
        .reg            = &btmrvl_reg_88xx,
+       .support_pscan_win_report = true,
        .sd_blksz_fw_dl = 256,
 };
 
@@ -1067,6 +1071,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
                card->firmware = data->firmware;
                card->reg = data->reg;
                card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
+               card->support_pscan_win_report = data->support_pscan_win_report;
        }
 
        if (btmrvl_sdio_register_dev(card) < 0) {
index d4dd3b0fa53d16d68da0101494e43ca4664716c2..453559f98a75e4a895bf1a1086e1d5e754c2509a 100644 (file)
@@ -89,6 +89,7 @@ struct btmrvl_sdio_card {
        const char *helper;
        const char *firmware;
        const struct btmrvl_sdio_card_reg *reg;
+       bool support_pscan_win_report;
        u16 sd_blksz_fw_dl;
        u8 rx_unit;
        struct btmrvl_private *priv;
@@ -98,6 +99,7 @@ struct btmrvl_sdio_device {
        const char *helper;
        const char *firmware;
        const struct btmrvl_sdio_card_reg *reg;
+       const bool support_pscan_win_report;
        u16 sd_blksz_fw_dl;
 };
 
index 6250fc2fb93a7257697fa2efe34acfae204dfe7f..ed7b33b06b43929503dd9e17517988aad4142a79 100644 (file)
@@ -30,9 +30,6 @@
 
 #define VERSION "0.6"
 
-static bool ignore_dga;
-static bool ignore_csr;
-static bool ignore_sniffer;
 static bool disable_scofix;
 static bool force_scofix;
 
@@ -49,7 +46,8 @@ static struct usb_driver btusb_driver;
 #define BTUSB_WRONG_SCO_MTU    0x40
 #define BTUSB_ATH3012          0x80
 #define BTUSB_INTEL            0x100
-#define BTUSB_BCM_PATCHRAM     0x200
+#define BTUSB_INTEL_BOOT       0x200
+#define BTUSB_BCM_PATCHRAM     0x400
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
@@ -121,6 +119,10 @@ static const struct usb_device_id btusb_table[] = {
        /* IMC Networks - Broadcom based */
        { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
 
+       /* Intel Bluetooth USB Bootloader (RAM module) */
+       { USB_DEVICE(0x8087, 0x0a5a),
+         .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+
        { }     /* Terminating entry */
 };
 
@@ -175,6 +177,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -228,10 +231,12 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
 
        /* CSR BlueCore Bluetooth Sniffer */
-       { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER },
+       { USB_DEVICE(0x0a12, 0x0002),
+         .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
 
        /* Frontline ComProbe Bluetooth Sniffer */
-       { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER },
+       { USB_DEVICE(0x16d3, 0x0002),
+         .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
 
        /* Intel Bluetooth device */
        { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
@@ -1182,6 +1187,51 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
        return 0;
 }
 
+#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+
+static int btusb_check_bdaddr_intel(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       struct hci_rp_read_bd_addr *rp;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s reading Intel device address failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*rp)) {
+               BT_ERR("%s Intel device address length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       rp = (struct hci_rp_read_bd_addr *) skb->data;
+       if (rp->status) {
+               BT_ERR("%s Intel device address result failed (%02x)",
+                      hdev->name, rp->status);
+               kfree_skb(skb);
+               return -bt_to_errno(rp->status);
+       }
+
+       /* For some Intel based controllers, the default Bluetooth device
+        * address 00:03:19:9E:8B:00 can be found. These controllers are
+        * fully operational, but have the danger of duplicate addresses
+        * and that in turn can cause problems with Bluetooth operation.
+        */
+       if (!bacmp(&rp->bdaddr, BDADDR_INTEL)) {
+               BT_ERR("%s found Intel default device address (%pMR)",
+                      hdev->name, &rp->bdaddr);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_setup_intel(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -1254,6 +1304,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
                BT_INFO("%s: Intel device is already patched. patch num: %02x",
                        hdev->name, ver->fw_patch_num);
                kfree_skb(skb);
+               btusb_check_bdaddr_intel(hdev);
                return 0;
        }
 
@@ -1266,6 +1317,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        fw = btusb_setup_intel_get_fw(hdev, ver);
        if (!fw) {
                kfree_skb(skb);
+               btusb_check_bdaddr_intel(hdev);
                return 0;
        }
        fw_ptr = fw->data;
@@ -1345,6 +1397,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
                hdev->name);
 
+       btusb_check_bdaddr_intel(hdev);
        return 0;
 
 exit_mfg_disable:
@@ -1359,6 +1412,8 @@ exit_mfg_disable:
        kfree_skb(skb);
 
        BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
+
+       btusb_check_bdaddr_intel(hdev);
        return 0;
 
 exit_mfg_deactivate:
@@ -1379,9 +1434,29 @@ exit_mfg_deactivate:
        BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
                hdev->name);
 
+       btusb_check_bdaddr_intel(hdev);
+       return 0;
+}
+
+static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       long ret;
+
+       skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               BT_ERR("%s: changing Intel device address failed (%ld)",
+                       hdev->name, ret);
+               return ret;
+       }
+       kfree_skb(skb);
+
        return 0;
 }
 
+#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
+
 static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
 {
        struct btusb_data *data = hci_get_drvdata(hdev);
@@ -1395,6 +1470,7 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
        u16 opcode;
        struct sk_buff *skb;
        struct hci_rp_read_local_version *ver;
+       struct hci_rp_read_bd_addr *bda;
        long ret;
 
        snprintf(fw_name, sizeof(fw_name), "brcm/%s-%04x-%04x.hcd",
@@ -1404,8 +1480,7 @@ static int btusb_setup_bcm_patchram(struct hci_dev *hdev)
 
        ret = request_firmware(&fw, fw_name, &hdev->dev);
        if (ret < 0) {
-               BT_INFO("%s: BCM: patch %s not found", hdev->name,
-                       fw_name);
+               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
                return 0;
        }
 
@@ -1524,12 +1599,67 @@ reset_fw:
                ver->lmp_ver, ver->lmp_subver);
        kfree_skb(skb);
 
+       /* Read BD Address */
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               BT_ERR("%s: HCI_OP_READ_BD_ADDR failed (%ld)",
+                       hdev->name, ret);
+               goto done;
+       }
+
+       if (skb->len != sizeof(*bda)) {
+               BT_ERR("%s: HCI_OP_READ_BD_ADDR event length mismatch",
+                       hdev->name);
+               kfree_skb(skb);
+               ret = -EIO;
+               goto done;
+       }
+
+       bda = (struct hci_rp_read_bd_addr *) skb->data;
+       if (bda->status) {
+               BT_ERR("%s: HCI_OP_READ_BD_ADDR error status (%02x)",
+                      hdev->name, bda->status);
+               kfree_skb(skb);
+               ret = -bt_to_errno(bda->status);
+               goto done;
+       }
+
+       /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
+        * with no configured address.
+        */
+       if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0)) {
+               BT_INFO("%s: BCM: using default device address (%pMR)",
+                       hdev->name, &bda->bdaddr);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       kfree_skb(skb);
+
 done:
        release_firmware(fw);
 
        return ret;
 }
 
+static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       long ret;
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               ret = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Change address command failed (%ld)",
+                       hdev->name, ret);
+               return ret;
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_probe(struct usb_interface *intf,
                                const struct usb_device_id *id)
 {
@@ -1554,15 +1684,6 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info == BTUSB_IGNORE)
                return -ENODEV;
 
-       if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER)
-               return -ENODEV;
-
-       if (ignore_csr && id->driver_info & BTUSB_CSR)
-               return -ENODEV;
-
-       if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
-               return -ENODEV;
-
        if (id->driver_info & BTUSB_ATH3012) {
                struct usb_device *udev = interface_to_usbdev(intf);
 
@@ -1635,11 +1756,18 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_BCM92035)
                hdev->setup = btusb_setup_bcm92035;
 
-       if (id->driver_info & BTUSB_BCM_PATCHRAM)
+       if (id->driver_info & BTUSB_BCM_PATCHRAM) {
                hdev->setup = btusb_setup_bcm_patchram;
+               hdev->set_bdaddr = btusb_set_bdaddr_bcm;
+       }
 
-       if (id->driver_info & BTUSB_INTEL)
+       if (id->driver_info & BTUSB_INTEL) {
                hdev->setup = btusb_setup_intel;
+               hdev->set_bdaddr = btusb_set_bdaddr_intel;
+       }
+
+       if (id->driver_info & BTUSB_INTEL_BOOT)
+               set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
 
        /* Interface numbers are hardcoded in the specification */
        data->isoc = usb_ifnum_to_if(data->udev, 1);
@@ -1679,8 +1807,18 @@ static int btusb_probe(struct usb_interface *intf,
                /* New sniffer firmware has crippled HCI interface */
                if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
                        set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+       }
 
-               data->isoc = NULL;
+       if (id->driver_info & BTUSB_INTEL_BOOT) {
+               /* A bug in the bootloader causes that interrupt interface is
+                * only enabled after receiving SetInterface(0, AltSetting=0).
+                */
+               err = usb_set_interface(data->udev, 0, 0);
+               if (err < 0) {
+                       BT_ERR("failed to set interface 0, alt 0 %d", err);
+                       hci_free_dev(hdev);
+                       return err;
+               }
        }
 
        if (data->isoc) {
@@ -1845,15 +1983,6 @@ static struct usb_driver btusb_driver = {
 
 module_usb_driver(btusb_driver);
 
-module_param(ignore_dga, bool, 0644);
-MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
-
-module_param(ignore_csr, bool, 0644);
-MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
-
-module_param(ignore_sniffer, bool, 0644);
-MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
-
 module_param(disable_scofix, bool, 0644);
 MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
 
index fede8ca7147c8bbc778f1a25ec15501ed5f15f99..caacb422995dd4a69e18585275f32904a17bf708 100644 (file)
@@ -355,10 +355,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
 
 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
 {
-       struct h5 *h5 = hu->priv;
-
        h5_complete_rx_pkt(hu);
-       h5_reset_rx(h5);
 
        return 0;
 }
@@ -373,7 +370,6 @@ static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
                h5->rx_pending = 2;
        } else {
                h5_complete_rx_pkt(hu);
-               h5_reset_rx(h5);
        }
 
        return 0;
index add1c6a720637a4009e35bb2b0a9b4e605fb59b8..5bb5872ffee60c060232eb5ddcab7957125b0161 100644 (file)
@@ -40,7 +40,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "1.4"
+#define VERSION "1.5"
 
 static bool amp;
 
@@ -95,10 +95,21 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
-static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
+static int vhci_create_device(struct vhci_data *data, __u8 opcode)
 {
        struct hci_dev *hdev;
        struct sk_buff *skb;
+       __u8 dev_type;
+
+       /* bits 0-1 are dev_type (BR/EDR or AMP) */
+       dev_type = opcode & 0x03;
+
+       if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+               return -EINVAL;
+
+       /* bits 2-5 are reserved (must be zero) */
+       if (opcode & 0x3c)
+               return -EINVAL;
 
        skb = bt_skb_alloc(4, GFP_KERNEL);
        if (!skb)
@@ -121,6 +132,14 @@ static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
        hdev->flush = vhci_flush;
        hdev->send  = vhci_send_frame;
 
+       /* bit 6 is for external configuration */
+       if (opcode & 0x40)
+               set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+
+       /* bit 7 is for raw device */
+       if (opcode & 0x80)
+               set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
                hci_free_dev(hdev);
@@ -132,7 +151,7 @@ static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
        bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
 
        *skb_put(skb, 1) = 0xff;
-       *skb_put(skb, 1) = dev_type;
+       *skb_put(skb, 1) = opcode;
        put_unaligned_le16(hdev->id, skb_put(skb, 2));
        skb_queue_tail(&data->readq, skb);
 
@@ -146,7 +165,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
 {
        size_t len = iov_length(iov, count);
        struct sk_buff *skb;
-       __u8 pkt_type, dev_type;
+       __u8 pkt_type, opcode;
        unsigned long i;
        int ret;
 
@@ -190,7 +209,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
 
                cancel_delayed_work_sync(&data->open_timeout);
 
-               dev_type = *((__u8 *) skb->data);
+               opcode = *((__u8 *) skb->data);
                skb_pull(skb, 1);
 
                if (skb->len > 0) {
@@ -200,10 +219,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
 
                kfree_skb(skb);
 
-               if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
-                       return -EINVAL;
-
-               ret = vhci_create_device(data, dev_type);
+               ret = vhci_create_device(data, opcode);
                break;
 
        default:
index c3986452194dd01ee4fed4b4bff14f9f5813523d..2c68da1ceeeef9ac39c272e0309dbb7ed9c85514 100644 (file)
@@ -1460,7 +1460,8 @@ static int fwnet_probe(struct fw_unit *unit,
                goto have_dev;
        }
 
-       net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+       net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN,
+                          fwnet_init_dev);
        if (net == NULL) {
                mutex_unlock(&fwnet_device_mutex);
                return -ENOMEM;
index ce4be3738d468a58aeff47a54737039cdafe3b0e..737fa2e0e782082c1ed137823c509327e5b2a123 100644 (file)
@@ -1115,7 +1115,7 @@ static int ssi_protocol_probe(struct device *dev)
                goto out;
        }
 
-       ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup);
+       ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
        if (!ssi->netdev) {
                dev_err(dev, "No memory for netdev\n");
                err = -ENOMEM;
index 8af33cf1fc4e85eb120c4e80578f7bb3088d9f41..2d5cbf4363e4de0758a4d7260341a136b3ba2544 100644 (file)
@@ -734,7 +734,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
        /* change ethxxx to iwxxx */
        strcpy(name, "iw");
        strcat(name, &c2dev->netdev->name[3]);
-       netdev = alloc_netdev(0, name, setup);
+       netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
        if (!netdev) {
                printk(KERN_ERR PFX "%s -  etherdev alloc failed",
                        __func__);
index 768a0fb67dd6d5995545f40037aa2c5157548ce1..c2fb71c182a8f0a2a968446813898c42b9e476a7 100644 (file)
@@ -79,9 +79,10 @@ static int dack_mode = 1;
 module_param(dack_mode, int, 0644);
 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
 
-int c4iw_max_read_depth = 8;
+uint c4iw_max_read_depth = 32;
 module_param(c4iw_max_read_depth, int, 0644);
-MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+MODULE_PARM_DESC(c4iw_max_read_depth,
+                "Per-connection max ORD/IRD (default=32)");
 
 static int enable_tcp_timestamps;
 module_param(enable_tcp_timestamps, int, 0644);
@@ -474,7 +475,8 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
                                          16)) | FW_WR_FLOWID(ep->hwtid));
 
        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
-       flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
+       flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
+                                           (ep->com.dev->rdev.lldi.pf));
        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
        flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@ -821,6 +823,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
        if (mpa_rev_to_use == 2) {
                mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
                                               sizeof (struct mpa_v2_conn_params));
+               PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+                    ep->ord);
                mpa_v2_params.ird = htons((u16)ep->ird);
                mpa_v2_params.ord = htons((u16)ep->ord);
 
@@ -1190,8 +1194,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
                        sizeof(struct mpa_v2_conn_params);
        } else {
                /* this means MPA_v1 is used. Send max supported */
-               event.ord = c4iw_max_read_depth;
-               event.ird = c4iw_max_read_depth;
+               event.ord = cur_max_read_depth(ep->com.dev);
+               event.ird = cur_max_read_depth(ep->com.dev);
                event.private_data_len = ep->plen;
                event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
        }
@@ -1255,6 +1259,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        return credits;
 }
 
+#define RELAXED_IRD_NEGOTIATION 1
+
 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
        struct mpa_message *mpa;
@@ -1366,17 +1372,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                                MPA_V2_IRD_ORD_MASK;
                        resp_ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
+                            __func__, resp_ird, resp_ord, ep->ird, ep->ord);
 
                        /*
                         * This is a double-check. Ideally, below checks are
                         * not required since ird/ord stuff has been taken
                         * care of in c4iw_accept_cr
                         */
-                       if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
+                       if (ep->ird < resp_ord) {
+                               if (RELAXED_IRD_NEGOTIATION && resp_ord <=
+                                   ep->com.dev->rdev.lldi.max_ordird_qp)
+                                       ep->ird = resp_ord;
+                               else
+                                       insuff_ird = 1;
+                       } else if (ep->ird > resp_ord) {
+                               ep->ird = resp_ord;
+                       }
+                       if (ep->ord > resp_ird) {
+                               if (RELAXED_IRD_NEGOTIATION)
+                                       ep->ord = resp_ird;
+                               else
+                                       insuff_ird = 1;
+                       }
+                       if (insuff_ird) {
                                err = -ENOMEM;
                                ep->ird = resp_ord;
                                ep->ord = resp_ird;
-                               insuff_ird = 1;
                        }
 
                        if (ntohs(mpa_v2_params->ird) &
@@ -1579,6 +1601,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
                                MPA_V2_IRD_ORD_MASK;
                        ep->ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+                            ep->ord);
                        if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
                                if (peer2peer) {
                                        if (ntohs(mpa_v2_params->ord) &
@@ -1798,6 +1822,20 @@ static int is_neg_adv(unsigned int status)
               status == CPL_ERR_KEEPALV_NEG_ADVICE;
 }
 
+static char *neg_adv_str(unsigned int status)
+{
+       switch (status) {
+       case CPL_ERR_RTX_NEG_ADVICE:
+               return "Retransmit timeout";
+       case CPL_ERR_PERSIST_NEG_ADVICE:
+               return "Persist timeout";
+       case CPL_ERR_KEEPALV_NEG_ADVICE:
+               return "Keepalive timeout";
+       default:
+               return "Unknown";
+       }
+}
+
 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
 {
        ep->snd_win = snd_win;
@@ -1996,8 +2034,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
             status, status2errno(status));
 
        if (is_neg_adv(status)) {
-               printk(KERN_WARNING MOD "Connection problems for atid %u\n",
-                       atid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Connection problems for atid %u status %u (%s)\n",
+                        atid, status, neg_adv_str(status));
                return 0;
        }
 
@@ -2472,8 +2511,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = lookup_tid(t, tid);
        if (is_neg_adv(req->status)) {
-               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
-                    ep->hwtid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Negative advice on abort - tid %u status %d (%s)\n",
+                        ep->hwtid, req->status, neg_adv_str(req->status));
                return 0;
        }
        PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -2731,8 +2771,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        BUG_ON(!qp);
 
        set_bit(ULP_ACCEPT, &ep->com.history);
-       if ((conn_param->ord > c4iw_max_read_depth) ||
-           (conn_param->ird > c4iw_max_read_depth)) {
+       if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
+           (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
                abort_connection(ep, NULL, GFP_KERNEL);
                err = -EINVAL;
                goto err;
@@ -2740,31 +2780,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
                if (conn_param->ord > ep->ird) {
-                       ep->ird = conn_param->ird;
-                       ep->ord = conn_param->ord;
-                       send_mpa_reject(ep, conn_param->private_data,
-                                       conn_param->private_data_len);
-                       abort_connection(ep, NULL, GFP_KERNEL);
-                       err = -ENOMEM;
-                       goto err;
+                       if (RELAXED_IRD_NEGOTIATION) {
+                               ep->ord = ep->ird;
+                       } else {
+                               ep->ird = conn_param->ird;
+                               ep->ord = conn_param->ord;
+                               send_mpa_reject(ep, conn_param->private_data,
+                                               conn_param->private_data_len);
+                               abort_connection(ep, NULL, GFP_KERNEL);
+                               err = -ENOMEM;
+                               goto err;
+                       }
                }
-               if (conn_param->ird > ep->ord) {
-                       if (!ep->ord)
-                               conn_param->ird = 1;
-                       else {
+               if (conn_param->ird < ep->ord) {
+                       if (RELAXED_IRD_NEGOTIATION &&
+                           ep->ord <= h->rdev.lldi.max_ordird_qp) {
+                               conn_param->ird = ep->ord;
+                       } else {
                                abort_connection(ep, NULL, GFP_KERNEL);
                                err = -ENOMEM;
                                goto err;
                        }
                }
-
        }
        ep->ird = conn_param->ird;
        ep->ord = conn_param->ord;
 
-       if (ep->mpa_attr.version != 2)
+       if (ep->mpa_attr.version == 1) {
                if (peer2peer && ep->ird == 0)
                        ep->ird = 1;
+       } else {
+               if (peer2peer &&
+                   (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
+                   (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
+                       ep->ird = 1;
+       }
 
        PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
 
@@ -2803,6 +2853,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        return 0;
 err1:
        ep->com.cm_id = NULL;
+       abort_connection(ep, NULL, GFP_KERNEL);
        cm_id->rem_ref(cm_id);
 err:
        mutex_unlock(&ep->com.mutex);
@@ -2886,8 +2937,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        int iptype;
        int iwpm_err = 0;
 
-       if ((conn_param->ord > c4iw_max_read_depth) ||
-           (conn_param->ird > c4iw_max_read_depth)) {
+       if ((conn_param->ord > cur_max_read_depth(dev)) ||
+           (conn_param->ird > cur_max_read_depth(dev))) {
                err = -EINVAL;
                goto out;
        }
@@ -3867,8 +3918,9 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
                return 0;
        }
        if (is_neg_adv(req->status)) {
-               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
-                    ep->hwtid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Negative advice on abort - tid %u status %d (%s)\n",
+                        ep->hwtid, req->status, neg_adv_str(req->status));
                kfree_skb(skb);
                return 0;
        }
index c04292c950f1750ac5fa2fc35b1ea4fda6a76079..0f773e78e0801ebf2b230298b15a3eb4bde15444 100644 (file)
@@ -633,11 +633,15 @@ proc_cqe:
                wq->sq.cidx = (uint16_t)idx;
                PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+               if (c4iw_wr_log)
+                       c4iw_log_wr_stats(wq, hw_cqe);
                t4_sq_consume(wq);
        } else {
                PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
                BUG_ON(t4_rq_empty(wq));
+               if (c4iw_wr_log)
+                       c4iw_log_wr_stats(wq, hw_cqe);
                t4_rq_consume(wq);
                goto skip_cqe;
        }
@@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        /*
         * Make actual HW queue 2x to avoid cdix_inc overflows.
         */
-       hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+       hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
 
        /*
         * Make HW queue at least 64 entries so GTS updates aren't too
@@ -909,14 +913,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        /*
         * memsize must be a multiple of the page size if its a user cq.
         */
-       if (ucontext) {
+       if (ucontext)
                memsize = roundup(memsize, PAGE_SIZE);
-               hwentries = memsize / sizeof *chp->cq.queue;
-               while (hwentries > T4_MAX_IQ_SIZE) {
-                       memsize -= PAGE_SIZE;
-                       hwentries = memsize / sizeof *chp->cq.queue;
-               }
-       }
        chp->cq.size = hwentries;
        chp->cq.memsize = memsize;
        chp->cq.vector = vector;
index 7db82b24302b63c576b8873565782987f6d33e0e..f25df5276c2214f6b0249eb0788413f0b6e2a212 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/moduleparam.h>
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
+#include <linux/math64.h>
 
 #include <rdma/ib_verbs.h>
 
@@ -55,6 +56,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644);
 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
                 "Allow DB Coalescing on T5 (default = 0)");
 
+int c4iw_wr_log = 0;
+module_param(c4iw_wr_log, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
+
+int c4iw_wr_log_size_order = 12;
+module_param(c4iw_wr_log_size_order, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log_size_order,
+                "Number of entries (log2) in the work request timing log.");
+
 struct uld_ctx {
        struct list_head entry;
        struct cxgb4_lld_info lldi;
@@ -103,6 +113,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
        return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
 }
 
+void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
+{
+       struct wr_log_entry le;
+       int idx;
+
+       if (!wq->rdev->wr_log)
+               return;
+
+       idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
+               (wq->rdev->wr_log_size - 1);
+       le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
+       getnstimeofday(&le.poll_host_ts);
+       le.valid = 1;
+       le.cqe_sge_ts = CQE_TS(cqe);
+       if (SQ_TYPE(cqe)) {
+               le.qid = wq->sq.qid;
+               le.opcode = CQE_OPCODE(cqe);
+               le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+               le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
+               le.wr_id = CQE_WRID_SQ_IDX(cqe);
+       } else {
+               le.qid = wq->rq.qid;
+               le.opcode = FW_RI_RECEIVE;
+               le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+               le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
+               le.wr_id = CQE_WRID_MSN(cqe);
+       }
+       wq->rdev->wr_log[idx] = le;
+}
+
+static int wr_log_show(struct seq_file *seq, void *v)
+{
+       struct c4iw_dev *dev = seq->private;
+       struct timespec prev_ts = {0, 0};
+       struct wr_log_entry *lep;
+       int prev_ts_set = 0;
+       int idx, end;
+
+#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+
+       idx = atomic_read(&dev->rdev.wr_log_idx) &
+               (dev->rdev.wr_log_size - 1);
+       end = idx - 1;
+       if (end < 0)
+               end = dev->rdev.wr_log_size - 1;
+       lep = &dev->rdev.wr_log[idx];
+       while (idx != end) {
+               if (lep->valid) {
+                       if (!prev_ts_set) {
+                               prev_ts_set = 1;
+                               prev_ts = lep->poll_host_ts;
+                       }
+                       seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
+                                  "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+                                  "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
+                                  "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
+                                  "cqe_poll_delta_ns %llu\n",
+                                  idx,
+                                  timespec_sub(lep->poll_host_ts,
+                                               prev_ts).tv_sec,
+                                  timespec_sub(lep->poll_host_ts,
+                                               prev_ts).tv_nsec,
+                                  lep->qid, lep->opcode,
+                                  lep->opcode == FW_RI_RECEIVE ?
+                                                       "msn" : "wrid",
+                                  lep->wr_id,
+                                  timespec_sub(lep->poll_host_ts,
+                                               lep->post_host_ts).tv_sec,
+                                  timespec_sub(lep->poll_host_ts,
+                                               lep->post_host_ts).tv_nsec,
+                                  lep->post_sge_ts, lep->cqe_sge_ts,
+                                  lep->poll_sge_ts,
+                                  ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
+                                  ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
+                       prev_ts = lep->poll_host_ts;
+               }
+               idx++;
+               if (idx > (dev->rdev.wr_log_size - 1))
+                       idx = 0;
+               lep = &dev->rdev.wr_log[idx];
+       }
+#undef ts2ns
+       return 0;
+}
+
+static int wr_log_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wr_log_show, inode->i_private);
+}
+
+static ssize_t wr_log_clear(struct file *file, const char __user *buf,
+                           size_t count, loff_t *pos)
+{
+       struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
+       int i;
+
+       if (dev->rdev.wr_log)
+               for (i = 0; i < dev->rdev.wr_log_size; i++)
+                       dev->rdev.wr_log[i].valid = 0;
+       return count;
+}
+
+static const struct file_operations wr_log_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = wr_log_open,
+       .release = single_release,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .write   = wr_log_clear,
+};
+
 static int dump_qp(int id, void *p, void *data)
 {
        struct c4iw_qp *qp = p;
@@ -241,12 +362,32 @@ static int dump_stag(int id, void *p, void *data)
        struct c4iw_debugfs_data *stagd = data;
        int space;
        int cc;
+       struct fw_ri_tpte tpte;
+       int ret;
 
        space = stagd->bufsize - stagd->pos - 1;
        if (space == 0)
                return 1;
 
-       cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+       ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
+                             (__be32 *)&tpte);
+       if (ret) {
+               dev_err(&stagd->devp->rdev.lldi.pdev->dev,
+                       "%s cxgb4_read_tpte err %d\n", __func__, ret);
+               return ret;
+       }
+       cc = snprintf(stagd->buf + stagd->pos, space,
+                     "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
+                     "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+                     (u32)id<<8,
+                     G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+                     G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+                     ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+                     ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
        if (cc < space)
                stagd->pos += cc;
        return 0;
@@ -259,7 +400,7 @@ static int stag_release(struct inode *inode, struct file *file)
                printk(KERN_INFO "%s null stagd?\n", __func__);
                return 0;
        }
-       kfree(stagd->buf);
+       vfree(stagd->buf);
        kfree(stagd);
        return 0;
 }
@@ -282,8 +423,8 @@ static int stag_open(struct inode *inode, struct file *file)
        idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
        spin_unlock_irq(&stagd->devp->lock);
 
-       stagd->bufsize = count * sizeof("0x12345678\n");
-       stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+       stagd->bufsize = count * 256;
+       stagd->buf = vmalloc(stagd->bufsize);
        if (!stagd->buf) {
                ret = -ENOMEM;
                goto err1;
@@ -348,6 +489,7 @@ static int stats_show(struct seq_file *seq, void *v)
                   dev->rdev.stats.act_ofld_conn_fails);
        seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
                   dev->rdev.stats.pas_ofld_conn_fails);
+       seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
        return 0;
 }
 
@@ -583,6 +725,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
        if (de && de->d_inode)
                de->d_inode->i_size = 4096;
 
+       if (c4iw_wr_log) {
+               de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
+                                        (void *)devp, &wr_log_debugfs_fops);
+               if (de && de->d_inode)
+                       de->d_inode->i_size = 4096;
+       }
        return 0;
 }
 
@@ -696,7 +844,20 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                pr_err(MOD "error allocating status page\n");
                goto err4;
        }
+
+       if (c4iw_wr_log) {
+               rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
+                                      sizeof(*rdev->wr_log), GFP_KERNEL);
+               if (rdev->wr_log) {
+                       rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
+                       atomic_set(&rdev->wr_log_idx, 0);
+               } else {
+                       pr_err(MOD "error allocating wr_log. Logging disabled\n");
+               }
+       }
+
        rdev->status_page->db_off = 0;
+
        return 0;
 err4:
        c4iw_rqtpool_destroy(rdev);
@@ -710,6 +871,7 @@ err1:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
@@ -768,6 +930,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        }
        devp->rdev.lldi = *infop;
 
+       /* init various hw-queue params based on lld info */
+       PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+            __func__, devp->rdev.lldi.sge_ingpadboundary,
+            devp->rdev.lldi.sge_egrstatuspagesize);
+
+       devp->rdev.hw_queue.t4_eq_status_entries =
+               devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+       devp->rdev.hw_queue.t4_max_eq_size = 65520;
+       devp->rdev.hw_queue.t4_max_iq_size = 65520;
+       devp->rdev.hw_queue.t4_max_rq_size = 8192 -
+               devp->rdev.hw_queue.t4_eq_status_entries - 1;
+       devp->rdev.hw_queue.t4_max_sq_size =
+               devp->rdev.hw_queue.t4_max_eq_size -
+               devp->rdev.hw_queue.t4_eq_status_entries - 1;
+       devp->rdev.hw_queue.t4_max_qp_depth =
+               devp->rdev.hw_queue.t4_max_rq_size;
+       devp->rdev.hw_queue.t4_max_cq_depth =
+               devp->rdev.hw_queue.t4_max_iq_size - 2;
+       devp->rdev.hw_queue.t4_stat_len =
+               devp->rdev.lldi.sge_egrstatuspagesize;
+
        /*
         * For T5 devices, we map all of BAR2 with WC.
         * For T4 devices with onchip qp mem, we map only that part
@@ -818,6 +1001,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        mutex_init(&devp->rdev.stats.lock);
        mutex_init(&devp->db_mutex);
        INIT_LIST_HEAD(&devp->db_fc_list);
+       devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
 
        if (c4iw_debugfs_root) {
                devp->debugfs_root = debugfs_create_dir(
index d61d0a18f784c9d1b032be277a87524f89449e41..fbe6051af254bac612de62dd36e1fa66d3150396 100644 (file)
 
 #include "iw_cxgb4.h"
 
+static void print_tpte(struct c4iw_dev *dev, u32 stag)
+{
+       int ret;
+       struct fw_ri_tpte tpte;
+
+       ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
+                             (__be32 *)&tpte);
+       if (ret) {
+               dev_err(&dev->rdev.lldi.pdev->dev,
+                       "%s cxgb4_read_tpte err %d\n", __func__, ret);
+               return;
+       }
+       PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
+              "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+              stag & 0xffffff00,
+              G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+              G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+              ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+              ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
+}
+
+static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+       __be64 *p = (void *)err_cqe;
+
+       dev_err(&dev->rdev.lldi.pdev->dev,
+               "AE qpid %d opcode %d status 0x%x "
+               "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
+               CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+               CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
+               CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+       PDBG("%016llx %016llx %016llx %016llx\n",
+            be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
+            be64_to_cpu(p[3]));
+
+       /*
+        * Ingress WRITE and READ_RESP errors provide
+        * the offending stag, so parse and log it.
+        */
+       if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
+                                CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
+               print_tpte(dev, CQE_WRID_STAG(err_cqe));
+}
+
 static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
                          struct c4iw_qp *qhp,
                          struct t4_cqe *err_cqe,
@@ -44,11 +93,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        struct c4iw_qp_attributes attrs;
        unsigned long flag;
 
-       printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
-              "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
-              CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
-              CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
-              CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+       dump_err_cqe(dev, err_cqe);
 
        if (qhp->attr.state == C4IW_QP_STATE_RTS) {
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
index 361fff7a07427197582348621d82c626629fbabf..b5678ac97393ab94ba73b6b1f396ae801801c480 100644 (file)
@@ -139,6 +139,29 @@ struct c4iw_stats {
        u64  pas_ofld_conn_fails;
 };
 
+struct c4iw_hw_queue {
+       int t4_eq_status_entries;
+       int t4_max_eq_size;
+       int t4_max_iq_size;
+       int t4_max_rq_size;
+       int t4_max_sq_size;
+       int t4_max_qp_depth;
+       int t4_max_cq_depth;
+       int t4_stat_len;
+};
+
+struct wr_log_entry {
+       struct timespec post_host_ts;
+       struct timespec poll_host_ts;
+       u64 post_sge_ts;
+       u64 cqe_sge_ts;
+       u64 poll_sge_ts;
+       u16 qid;
+       u16 wr_id;
+       u8 opcode;
+       u8 valid;
+};
+
 struct c4iw_rdev {
        struct c4iw_resource resource;
        unsigned long qpshift;
@@ -156,7 +179,11 @@ struct c4iw_rdev {
        unsigned long oc_mw_pa;
        void __iomem *oc_mw_kva;
        struct c4iw_stats stats;
+       struct c4iw_hw_queue hw_queue;
        struct t4_dev_status_page *status_page;
+       atomic_t wr_log_idx;
+       struct wr_log_entry *wr_log;
+       int wr_log_size;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -166,7 +193,7 @@ static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 
 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 {
-       return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+       return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
 #define C4IW_WR_TO (30*HZ)
@@ -237,6 +264,7 @@ struct c4iw_dev {
        struct idr atid_idr;
        struct idr stid_idr;
        struct list_head db_fc_list;
+       u32 avail_ird;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -318,6 +346,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp,
        _remove_handle(rhp, idr, id, 0);
 }
 
+extern uint c4iw_max_read_depth;
+
+static inline int cur_max_read_depth(struct c4iw_dev *dev)
+{
+       return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
+}
+
 struct c4iw_pd {
        struct ib_pd ibpd;
        u32 pdid;
@@ -991,7 +1026,8 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
 
 extern struct cxgb4_client t4c_client;
 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
-extern int c4iw_max_read_depth;
+extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
+extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
index b1d305338de6a06477d41ce11eb6f9519b3007c9..72e3b69d1b76c3cf96763883cc2708eebb327d5a 100644 (file)
@@ -318,14 +318,16 @@ static int c4iw_query_device(struct ib_device *ibdev,
        props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
        props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
        props->max_mr_size = T4_MAX_MR_SIZE;
-       props->max_qp = T4_MAX_NUM_QP;
-       props->max_qp_wr = T4_MAX_QP_DEPTH;
+       props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+       props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
        props->max_sge = T4_MAX_RECV_SGE;
        props->max_sge_rd = 1;
-       props->max_qp_rd_atom = c4iw_max_read_depth;
-       props->max_qp_init_rd_atom = c4iw_max_read_depth;
-       props->max_cq = T4_MAX_NUM_CQ;
-       props->max_cqe = T4_MAX_CQ_DEPTH;
+       props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
+       props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
+                                   c4iw_max_read_depth);
+       props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+       props->max_cq = dev->rdev.lldi.vr->qp.size;
+       props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
        props->max_mr = c4iw_num_stags(&dev->rdev);
        props->max_pd = T4_MAX_NUM_PD;
        props->local_ca_ack_delay = 0;
index 086f62f5dc9e2ba5978e81f02e8e392c8e201774..c158fcc02bca2b252d8d78b71599697fee361438 100644 (file)
@@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD;
 module_param(max_fr_immd, int, 0644);
 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
 
+static int alloc_ird(struct c4iw_dev *dev, u32 ird)
+{
+       int ret = 0;
+
+       spin_lock_irq(&dev->lock);
+       if (ird <= dev->avail_ird)
+               dev->avail_ird -= ird;
+       else
+               ret = -ENOMEM;
+       spin_unlock_irq(&dev->lock);
+
+       if (ret)
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "device IRD resources exhausted\n");
+
+       return ret;
+}
+
+static void free_ird(struct c4iw_dev *dev, int ird)
+{
+       spin_lock_irq(&dev->lock);
+       dev->avail_ird += ird;
+       spin_unlock_irq(&dev->lock);
+}
+
 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
 {
        unsigned long flag;
@@ -180,9 +205,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        }
 
        /*
-        * RQT must be a power of 2.
+        * RQT must be a power of 2 and at least 16 deep.
         */
-       wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+       wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
        wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
        if (!wq->rq.rqt_hwaddr) {
                ret = -ENOMEM;
@@ -258,7 +283,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        /*
         * eqsize is the number of 64B entries plus the status page size.
         */
-       eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+       eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
+               rdev->hw_queue.t4_eq_status_entries;
 
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
@@ -283,7 +309,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        /*
         * eqsize is the number of 64B entries plus the status page size.
         */
-       eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+       eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+               rdev->hw_queue.t4_eq_status_entries;
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
                V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
@@ -796,6 +823,11 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                  qhp->sq_sig_all;
                swsqe->flushed = 0;
                swsqe->wr_id = wr->wr_id;
+               if (c4iw_wr_log) {
+                       swsqe->sge_ts = cxgb4_read_sge_timestamp(
+                                       qhp->rhp->rdev.lldi.ports[0]);
+                       getnstimeofday(&swsqe->host_ts);
+               }
 
                init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
 
@@ -859,6 +891,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                }
 
                qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+               if (c4iw_wr_log) {
+                       qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
+                               cxgb4_read_sge_timestamp(
+                                               qhp->rhp->rdev.lldi.ports[0]);
+                       getnstimeofday(
+                               &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+               }
 
                wqe->recv.opcode = FW_RI_RECV_WR;
                wqe->recv.r1 = 0;
@@ -1202,12 +1241,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
        int ret;
        struct sk_buff *skb;
 
-       PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
-            qhp->ep->hwtid);
+       PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+            qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
 
        skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       ret = alloc_ird(rhp, qhp->attr.max_ird);
+       if (ret) {
+               qhp->attr.max_ird = 0;
+               kfree_skb(skb);
+               goto out;
+       }
        set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
 
        wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1258,10 +1305,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
-               goto out;
+               goto err1;
 
        ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+       if (!ret)
+               goto out;
+err1:
+       free_ird(rhp, qhp->attr.max_ird);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
@@ -1306,7 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        newattr.max_ord = attrs->max_ord;
                }
                if (mask & C4IW_QP_ATTR_MAX_IRD) {
-                       if (attrs->max_ird > c4iw_max_read_depth) {
+                       if (attrs->max_ird > cur_max_read_depth(rhp)) {
                                ret = -EINVAL;
                                goto out;
                        }
@@ -1529,6 +1580,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        if (!list_empty(&qhp->db_fc_entry))
                list_del_init(&qhp->db_fc_entry);
        spin_unlock_irq(&rhp->lock);
+       free_ird(rhp, qhp->attr.max_ird);
 
        ucontext = ib_qp->uobject ?
                   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
@@ -1569,13 +1621,17 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
                return ERR_PTR(-EINVAL);
 
-       rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
-       if (rqsize > T4_MAX_RQ_SIZE)
+       if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
                return ERR_PTR(-E2BIG);
+       rqsize = attrs->cap.max_recv_wr + 1;
+       if (rqsize < 8)
+               rqsize = 8;
 
-       sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
-       if (sqsize > T4_MAX_SQ_SIZE)
+       if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
                return ERR_PTR(-E2BIG);
+       sqsize = attrs->cap.max_send_wr + 1;
+       if (sqsize < 8)
+               sqsize = 8;
 
        ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
 
@@ -1583,19 +1639,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        if (!qhp)
                return ERR_PTR(-ENOMEM);
        qhp->wq.sq.size = sqsize;
-       qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+       qhp->wq.sq.memsize =
+               (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+               sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
        qhp->wq.sq.flush_cidx = -1;
        qhp->wq.rq.size = rqsize;
-       qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+       qhp->wq.rq.memsize =
+               (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+               sizeof(*qhp->wq.rq.queue);
 
        if (ucontext) {
                qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
                qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
        }
 
-       PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
-            __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
-
        ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
        if (ret)
@@ -1619,8 +1676,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.enable_rdma_read = 1;
        qhp->attr.enable_rdma_write = 1;
        qhp->attr.enable_bind = 1;
-       qhp->attr.max_ord = 1;
-       qhp->attr.max_ird = 1;
+       qhp->attr.max_ord = 0;
+       qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
        mutex_init(&qhp->mutex);
@@ -1714,9 +1771,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
        INIT_LIST_HEAD(&qhp->db_fc_entry);
-       PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
-            __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
-            qhp->wq.sq.qid);
+       PDBG("%s sq id %u size %u memsize %zu num_entries %u "
+            "rq id %u size %u memsize %zu num_entries %u\n", __func__,
+            qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
+            attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
+            qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
        return &qhp->ibqp;
 err8:
        kfree(mm5);
@@ -1804,5 +1863,11 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        memset(attr, 0, sizeof *attr);
        memset(init_attr, 0, sizeof *init_attr);
        attr->qp_state = to_ib_qp_state(qhp->attr.state);
+       init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
+       init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
+       init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
+       init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
index 68b0a6bf4eb00e23f8b737828ff85e465cc24173..df5edfa31a8fe118b950d500fc4bed3c76da5d47 100644 (file)
 #include "t4_msg.h"
 #include "t4fw_ri_api.h"
 
-#define T4_MAX_NUM_QP 65536
-#define T4_MAX_NUM_CQ 65536
 #define T4_MAX_NUM_PD 65536
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
-#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_IQ_SIZE (65520 - 1)
-#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
-#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
-#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
-#define T4_MAX_NUM_STAG (1<<15)
 #define T4_MAX_MR_SIZE (~0ULL)
 #define T4_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
 #define T4_STAG_UNSET 0xffffffff
 #define T4_FW_MAJ 0
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
 #define A_PCIE_MA_SYNC 0x30b4
 
 struct t4_status_page {
@@ -244,8 +233,8 @@ struct t4_cqe {
 #define CQE_WRID_SQ_IDX(x)     ((x)->u.scqe.cidx)
 
 /* generic accessor macros */
-#define CQE_WRID_HI(x)         ((x)->u.gen.wrid_hi)
-#define CQE_WRID_LOW(x)                ((x)->u.gen.wrid_low)
+#define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
+#define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
 
 /* macros for flit 3 of the cqe */
 #define S_CQE_GENBIT   63
@@ -277,6 +266,8 @@ struct t4_swsqe {
        int                     signaled;
        u16                     idx;
        int                     flushed;
+       struct timespec         host_ts;
+       u64                     sge_ts;
 };
 
 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -314,6 +305,8 @@ struct t4_sq {
 
 struct t4_swrqe {
        u64 wr_id;
+       struct timespec host_ts;
+       u64 sge_ts;
 };
 
 struct t4_rq {
index 91289a051af928c7ffb04bfb7ac2ee8c342be7fe..5709e77faf7cb9d94a5d4e60f7f9af3d8b6efddb 100644 (file)
@@ -849,6 +849,5 @@ enum {                     /* TCP congestion control algorithms */
 #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
 
 #define CONG_CNTRL_VALID   (1 << 18)
-#define T5_OPT_2_VALID       (1 << 31)
 
 #endif /* _T4FW_RI_API_H_ */
index 5786a78ff8bc97811612818ab08f1fd8c3a01132..4e675f4fecc974447fde0171e9dfa94403d0f176 100644 (file)
@@ -1394,8 +1394,8 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
 {
        struct net_device *dev;
 
-       dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
-                          ipoib_setup);
+       dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
+                          NET_NAME_UNKNOWN, ipoib_setup);
        if (!dev)
                return NULL;
 
index d9aebbc510cc77d4116fa17c5349034f329e2362..c2ed6246a389c63719fce2e5a66b1b5fb303f82f 100644 (file)
@@ -2588,7 +2588,8 @@ isdn_net_new(char *name, struct net_device *master)
                printk(KERN_WARNING "isdn_net: Could not allocate net-device\n");
                return NULL;
        }
-       netdev->dev = alloc_netdev(sizeof(isdn_net_local), name, _isdn_setup);
+       netdev->dev = alloc_netdev(sizeof(isdn_net_local), name,
+                                  NET_NAME_UNKNOWN, _isdn_setup);
        if (!netdev->dev) {
                printk(KERN_WARNING "isdn_net: Could not allocate network device\n");
                kfree(netdev);
index 8a86b3025637d69c33a91ed7c697f5ad261fedb1..059e6117f22b5181d59229f0bd0af202466f7817 100644 (file)
@@ -1276,7 +1276,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
        if ((if_num = get_if(dvbnet)) < 0)
                return -EINVAL;
 
-       net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb", dvb_net_setup);
+       net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
+                          NET_NAME_UNKNOWN, dvb_net_setup);
        if (!net)
                return -ENOMEM;
 
index 3fac67a5204cfa768717a00b6628079fd37f939d..557f9782c53c59f4e741204cebcf1a1bf2023109 100644 (file)
@@ -544,7 +544,8 @@ xpnet_init(void)
         * use ether_setup() to init the majority of our device
         * structure and then override the necessary pieces.
         */
-       xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
+       xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, NET_NAME_UNKNOWN,
+                                   ether_setup);
        if (xpnet_device == NULL) {
                kfree(xpnet_broadcast_partitions);
                return -ENOMEM;
index 89402c3b64f8406ebc0e81a7d79fb341fa57df3d..c6f6f69f8961a58f40771d3b80d200d8fd730967 100644 (file)
@@ -148,6 +148,7 @@ config VXLAN
        tristate "Virtual eXtensible Local Area Network (VXLAN)"
        depends on INET
        select NET_IP_TUNNEL
+       select NET_UDP_TUNNEL
        ---help---
          This allows one to create vxlan virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. VXLAN is often used
index a956053608f9f6a9fdb94c877917150e6a25680e..3b790de6c97652791dbfadf0300d4f141c840d6d 100644 (file)
@@ -346,7 +346,8 @@ struct net_device *alloc_arcdev(const char *name)
        struct net_device *dev;
 
        dev = alloc_netdev(sizeof(struct arcnet_local),
-                          name && *name ? name : "arc%d", arcdev_setup);
+                          name && *name ? name : "arc%d", NET_NAME_UNKNOWN,
+                          arcdev_setup);
        if(dev) {
                struct arcnet_local *lp = netdev_priv(dev);
                spin_lock_init(&lp->lock);
index 0dfeaf5da3f2c914e2fd43e40d4bf4f3d8aff079..ee2c73a9de397da448bd3ac617b04bf1a6ae7749 100644 (file)
@@ -20,8 +20,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/skbuff.h>
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
@@ -301,8 +299,8 @@ static u16 __get_link_speed(struct port *port)
                }
        }
 
-       pr_debug("Port %d Received link speed %d update from adapter\n",
-                port->actor_port_number, speed);
+       netdev_dbg(slave->bond->dev, "Port %d Received link speed %d update from adapter\n",
+                  port->actor_port_number, speed);
        return speed;
 }
 
@@ -329,14 +327,14 @@ static u8 __get_duplex(struct port *port)
                switch (slave->duplex) {
                case DUPLEX_FULL:
                        retval = 0x1;
-                       pr_debug("Port %d Received status full duplex update from adapter\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Port %d Received status full duplex update from adapter\n",
+                                  port->actor_port_number);
                        break;
                case DUPLEX_HALF:
                default:
                        retval = 0x0;
-                       pr_debug("Port %d Received status NOT full duplex update from adapter\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Port %d Received status NOT full duplex update from adapter\n",
+                                  port->actor_port_number);
                        break;
                }
        }
@@ -1079,9 +1077,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        /* detect loopback situation */
                        if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
                                              &(port->actor_system))) {
-                               pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+                               netdev_err(port->slave->bond->dev, "An illegal loopback occurred on adapter (%s)\n"
                                       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
-                                      port->slave->bond->dev->name,
                                       port->slave->dev->name);
                                return;
                        }
@@ -1269,9 +1266,9 @@ static void ad_port_selection_logic(struct port *port)
                                port->next_port_in_aggregator = NULL;
                                port->actor_port_aggregator_identifier = 0;
 
-                               pr_debug("Port %d left LAG %d\n",
-                                        port->actor_port_number,
-                                        temp_aggregator->aggregator_identifier);
+                               netdev_dbg(bond->dev, "Port %d left LAG %d\n",
+                                          port->actor_port_number,
+                                          temp_aggregator->aggregator_identifier);
                                /* if the aggregator is empty, clear its
                                 * parameters, and set it ready to be attached
                                 */
@@ -1284,11 +1281,11 @@ static void ad_port_selection_logic(struct port *port)
                        /* meaning: the port was related to an aggregator
                         * but was not on the aggregator port list
                         */
-                       pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
-                                           port->slave->bond->dev->name,
-                                           port->actor_port_number,
-                                           port->slave->dev->name,
-                                           port->aggregator->aggregator_identifier);
+                       net_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+                                            port->slave->bond->dev->name,
+                                            port->actor_port_number,
+                                            port->slave->dev->name,
+                                            port->aggregator->aggregator_identifier);
                }
        }
        /* search on all aggregators for a suitable aggregator for this port */
@@ -1318,9 +1315,9 @@ static void ad_port_selection_logic(struct port *port)
                        port->next_port_in_aggregator = aggregator->lag_ports;
                        port->aggregator->num_of_ports++;
                        aggregator->lag_ports = port;
-                       pr_debug("Port %d joined LAG %d(existing LAG)\n",
-                                port->actor_port_number,
-                                port->aggregator->aggregator_identifier);
+                       netdev_dbg(bond->dev, "Port %d joined LAG %d(existing LAG)\n",
+                                  port->actor_port_number,
+                                  port->aggregator->aggregator_identifier);
 
                        /* mark this port as selected */
                        port->sm_vars |= AD_PORT_SELECTED;
@@ -1363,12 +1360,11 @@ static void ad_port_selection_logic(struct port *port)
                        /* mark this port as selected */
                        port->sm_vars |= AD_PORT_SELECTED;
 
-                       pr_debug("Port %d joined LAG %d(new LAG)\n",
-                                port->actor_port_number,
-                                port->aggregator->aggregator_identifier);
+                       netdev_dbg(bond->dev, "Port %d joined LAG %d(new LAG)\n",
+                                  port->actor_port_number,
+                                  port->aggregator->aggregator_identifier);
                } else {
-                       pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
-                              port->slave->bond->dev->name,
+                       netdev_err(bond->dev, "Port %d (on %s) did not find a suitable aggregator\n",
                               port->actor_port_number, port->slave->dev->name);
                }
        }
@@ -1445,9 +1441,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
                break;
 
        default:
-               pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
-                                   curr->slave->bond->dev->name,
-                                   __get_agg_selection_mode(curr->lag_ports));
+               net_warn_ratelimited("%s: Impossible agg select mode %d\n",
+                                    curr->slave->bond->dev->name,
+                                    __get_agg_selection_mode(curr->lag_ports));
                break;
        }
 
@@ -1539,40 +1535,40 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 
        /* if there is new best aggregator, activate it */
        if (best) {
-               pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                        best->aggregator_identifier, best->num_of_ports,
-                        best->actor_oper_aggregator_key,
-                        best->partner_oper_aggregator_key,
-                        best->is_individual, best->is_active);
-               pr_debug("best ports %p slave %p %s\n",
-                        best->lag_ports, best->slave,
-                        best->slave ? best->slave->dev->name : "NULL");
+               netdev_dbg(bond->dev, "best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                          best->aggregator_identifier, best->num_of_ports,
+                          best->actor_oper_aggregator_key,
+                          best->partner_oper_aggregator_key,
+                          best->is_individual, best->is_active);
+               netdev_dbg(bond->dev, "best ports %p slave %p %s\n",
+                          best->lag_ports, best->slave,
+                          best->slave ? best->slave->dev->name : "NULL");
 
                bond_for_each_slave_rcu(bond, slave, iter) {
                        agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
-                       pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                                agg->aggregator_identifier, agg->num_of_ports,
-                                agg->actor_oper_aggregator_key,
-                                agg->partner_oper_aggregator_key,
-                                agg->is_individual, agg->is_active);
+                       netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                                  agg->aggregator_identifier, agg->num_of_ports,
+                                  agg->actor_oper_aggregator_key,
+                                  agg->partner_oper_aggregator_key,
+                                  agg->is_individual, agg->is_active);
                }
 
                /* check if any partner replys */
                if (best->is_individual) {
-                       pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
-                                           best->slave ?
-                                           best->slave->bond->dev->name : "NULL");
+                       net_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+                                            best->slave ?
+                                            best->slave->bond->dev->name : "NULL");
                }
 
                best->is_active = 1;
-               pr_debug("LAG %d chosen as the active LAG\n",
-                        best->aggregator_identifier);
-               pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                        best->aggregator_identifier, best->num_of_ports,
-                        best->actor_oper_aggregator_key,
-                        best->partner_oper_aggregator_key,
-                        best->is_individual, best->is_active);
+               netdev_dbg(bond->dev, "LAG %d chosen as the active LAG\n",
+                          best->aggregator_identifier);
+               netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                          best->aggregator_identifier, best->num_of_ports,
+                          best->actor_oper_aggregator_key,
+                          best->partner_oper_aggregator_key,
+                          best->is_individual, best->is_active);
 
                /* disable the ports that were related to the former
                 * active_aggregator
@@ -1908,13 +1904,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: Trying to unbind an uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
-       pr_debug("Unbinding Link Aggregation Group %d\n",
-                aggregator->aggregator_identifier);
+       netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n",
+                  aggregator->aggregator_identifier);
 
        /* Tell the partner that this port is not suitable for aggregation */
        port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1949,14 +1945,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
                         * new aggregator
                         */
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
-                               pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
-                                        aggregator->aggregator_identifier,
-                                        new_aggregator->aggregator_identifier);
+                               netdev_dbg(bond->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n",
+                                          aggregator->aggregator_identifier,
+                                          new_aggregator->aggregator_identifier);
 
                                if ((new_aggregator->lag_ports == port) &&
                                    new_aggregator->is_active) {
-                                       pr_info("%s: Removing an active aggregator\n",
-                                               aggregator->slave->bond->dev->name);
+                                       netdev_info(bond->dev, "Removing an active aggregator\n");
                                         select_new_active_agg = 1;
                                }
 
@@ -1986,8 +1981,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                if (select_new_active_agg)
                                        ad_agg_selection_logic(__get_first_agg(port));
                        } else {
-                               pr_warn("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
-                                       slave->bond->dev->name);
+                               netdev_warn(bond->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n");
                        }
                } else {
                        /* in case that the only port related to this
@@ -1996,8 +1990,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                        select_new_active_agg = aggregator->is_active;
                        ad_clear_agg(aggregator);
                        if (select_new_active_agg) {
-                               pr_info("%s: Removing an active aggregator\n",
-                                       slave->bond->dev->name);
+                               netdev_info(bond->dev, "Removing an active aggregator\n");
                                /* select new active aggregator */
                                temp_aggregator = __get_first_agg(port);
                                if (temp_aggregator)
@@ -2006,7 +1999,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                }
        }
 
-       pr_debug("Unbinding port %d\n", port->actor_port_number);
+       netdev_dbg(bond->dev, "Unbinding port %d\n", port->actor_port_number);
 
        /* find the aggregator that this port is connected to */
        bond_for_each_slave(bond, slave_iter, iter) {
@@ -2029,8 +2022,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                        select_new_active_agg = temp_aggregator->is_active;
                                        ad_clear_agg(temp_aggregator);
                                        if (select_new_active_agg) {
-                                               pr_info("%s: Removing an active aggregator\n",
-                                                       slave->bond->dev->name);
+                                               netdev_info(bond->dev, "Removing an active aggregator\n");
                                                /* select new active aggregator */
                                                ad_agg_selection_logic(__get_first_agg(port));
                                        }
@@ -2081,8 +2073,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                /* select the active aggregator for the bond */
                if (port) {
                        if (!port->slave) {
-                               pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
-                                                   bond->dev->name);
+                               net_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
+                                                    bond->dev->name);
                                goto re_arm;
                        }
 
@@ -2096,7 +2088,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        bond_for_each_slave_rcu(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave)->port);
                if (!port->slave) {
-                       pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
+                       net_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
                                            bond->dev->name);
                        goto re_arm;
                }
@@ -2158,16 +2150,16 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
                port = &(SLAVE_AD_INFO(slave)->port);
 
                if (!port->slave) {
-                       pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
-                                           slave->dev->name, slave->bond->dev->name);
+                       net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+                                            slave->dev->name, slave->bond->dev->name);
                        return ret;
                }
 
                switch (lacpdu->subtype) {
                case AD_TYPE_LACPDU:
                        ret = RX_HANDLER_CONSUMED;
-                       pr_debug("Received LACPDU on port %d\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
+                                  port->actor_port_number);
                        /* Protect against concurrent state machines */
                        __get_state_machine_lock(port);
                        ad_rx_machine(lacpdu, port);
@@ -2182,20 +2174,20 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
 
                        switch (((struct bond_marker *)lacpdu)->tlv_type) {
                        case AD_MARKER_INFORMATION_SUBTYPE:
-                               pr_debug("Received Marker Information on port %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
+                                          port->actor_port_number);
                                ad_marker_info_received((struct bond_marker *)lacpdu, port);
                                break;
 
                        case AD_MARKER_RESPONSE_SUBTYPE:
-                               pr_debug("Received Marker Response on port %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
+                                          port->actor_port_number);
                                ad_marker_response_received((struct bond_marker *)lacpdu, port);
                                break;
 
                        default:
-                               pr_debug("Received an unknown Marker subtype on slot %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
+                                          port->actor_port_number);
                        }
                }
        }
@@ -2216,8 +2208,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: speed changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "speed changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2226,7 +2218,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
        port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                (__get_link_speed(port) << 1);
-       pr_debug("Port %d changed speed\n", port->actor_port_number);
+       netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2249,8 +2241,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("%s: Warning: duplex changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "duplex changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2259,7 +2251,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                __get_duplex(port);
-       pr_debug("Port %d changed duplex\n", port->actor_port_number);
+       netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2283,8 +2275,8 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: link status changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "link status changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2311,9 +2303,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_BITS);
        }
-       pr_debug("Port %d changed link status to %s\n",
-                port->actor_port_number,
-                link == BOND_LINK_UP ? "UP" : "DOWN");
+       netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
+                  port->actor_port_number,
+                  link == BOND_LINK_UP ? "UP" : "DOWN");
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2427,8 +2419,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        int agg_id;
 
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
-               pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
-                        dev->name);
+               netdev_dbg(dev, "__bond_3ad_get_active_agg_info failed\n");
                goto err_free;
        }
 
@@ -2436,7 +2427,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        agg_id = ad_info.aggregator_id;
 
        if (slaves_in_agg == 0) {
-               pr_debug("%s: Error: active aggregator is empty\n", dev->name);
+               netdev_dbg(dev, "active aggregator is empty\n");
                goto err_free;
        }
 
@@ -2462,8 +2453,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (slave_agg_no >= 0) {
-               pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
-                      dev->name, agg_id);
+               netdev_err(dev, "Couldn't find a slave to tx on for aggregator ID %d\n",
+                          agg_id);
                goto err_free;
        }
 
index 76c0dade233f904324631dba34be0f370d193179..95dd1f58c260d941c51ac4a369b2b97ba3f4ba34 100644 (file)
@@ -19,8 +19,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -202,6 +200,7 @@ static int tlb_initialize(struct bonding *bond)
 static void tlb_deinitialize(struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct tlb_up_slave *arr;
 
        _lock_tx_hashtbl_bh(bond);
 
@@ -209,6 +208,10 @@ static void tlb_deinitialize(struct bonding *bond)
        bond_info->tx_hashtbl = NULL;
 
        _unlock_tx_hashtbl_bh(bond);
+
+       arr = rtnl_dereference(bond_info->slave_arr);
+       if (arr)
+               kfree_rcu(arr, rcu);
 }
 
 static long long compute_gap(struct slave *slave)
@@ -369,7 +372,7 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
        if (arp->op_code == htons(ARPOP_REPLY)) {
                /* update rx hash table for this ARP */
                rlb_update_entry_from_arp(bond, arp);
-               pr_debug("Server received an ARP Reply from client\n");
+               netdev_dbg(bond->dev, "Server received an ARP Reply from client\n");
        }
 out:
        return RX_HANDLER_ANOTHER;
@@ -448,11 +451,13 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
  */
 static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 {
-       if (!bond->curr_active_slave)
+       struct slave *curr_active = bond_deref_active_protected(bond);
+
+       if (!curr_active)
                return;
 
        if (!bond->alb_info.primary_is_promisc) {
-               if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
+               if (!dev_set_promiscuity(curr_active->dev, 1))
                        bond->alb_info.primary_is_promisc = 1;
                else
                        bond->alb_info.primary_is_promisc = 0;
@@ -460,7 +465,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr, true);
+       alb_send_learning_packets(curr_active, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -509,7 +514,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
 
        write_lock_bh(&bond->curr_slave_lock);
 
-       if (slave != bond->curr_active_slave)
+       if (slave != bond_deref_active_protected(bond))
                rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
 
        write_unlock_bh(&bond->curr_slave_lock);
@@ -533,8 +538,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
                                 client_info->slave->dev->dev_addr,
                                 client_info->mac_dst);
                if (!skb) {
-                       pr_err("%s: Error: failed to create an ARP packet\n",
-                              client_info->slave->bond->dev->name);
+                       netdev_err(client_info->slave->bond->dev,
+                                  "failed to create an ARP packet\n");
                        continue;
                }
 
@@ -543,8 +548,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
                if (client_info->vlan_id) {
                        skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
                        if (!skb) {
-                               pr_err("%s: Error: failed to insert VLAN tag\n",
-                                      client_info->slave->bond->dev->name);
+                               netdev_err(client_info->slave->bond->dev,
+                                          "failed to insert VLAN tag\n");
                                continue;
                        }
                }
@@ -628,8 +633,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
                client_info = &(bond_info->rx_hashtbl[hash_index]);
 
                if (!client_info->slave) {
-                       pr_err("%s: Error: found a client with no channel in the client's hash table\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
                        continue;
                }
                /*update all clients using this src_ip, that are not assigned
@@ -684,7 +688,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                         * move the old client to primary (curr_active_slave) so
                         * that the new client can be assigned to this entry.
                         */
-                       if (bond->curr_active_slave &&
+                       if (curr_active_slave &&
                            client_info->slave != curr_active_slave) {
                                client_info->slave = curr_active_slave;
                                rlb_update_client(client_info);
@@ -765,7 +769,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                tx_slave = rlb_choose_channel(skb, bond);
                if (tx_slave)
                        ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
-               pr_debug("Server sent ARP Reply packet\n");
+               netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
        } else if (arp->op_code == htons(ARPOP_REQUEST)) {
                /* Create an entry in the rx_hashtbl for this client as a
                 * place holder.
@@ -785,7 +789,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 * updated with their assigned mac.
                 */
                rlb_req_update_subnet_clients(bond, arp->ip_src);
-               pr_debug("Server sent ARP Request packet\n");
+               netdev_dbg(bond->dev, "Server sent ARP Request packet\n");
        }
 
        return tx_slave;
@@ -1024,8 +1028,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        if (vid) {
                skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
-                       pr_err("%s: Error: failed to insert VLAN tag\n",
-                              slave->bond->dev->name);
+                       netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
                        return;
                }
        }
@@ -1039,7 +1042,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
-       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
+       struct bond_vlan_tag *tags;
 
        /* send untagged */
        alb_send_lp_vid(slave, mac_addr, 0, 0);
@@ -1067,10 +1070,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                 * when strict_match is turned off.
                 */
                if (netif_is_macvlan(upper) && !strict_match) {
-                       memset(tags, 0, sizeof(tags));
-                       bond_verify_device_path(bond->dev, upper, tags);
+                       tags = bond_verify_device_path(bond->dev, upper, 0);
+                       if (IS_ERR_OR_NULL(tags))
+                               BUG();
                        alb_send_lp_vid(slave, upper->dev_addr,
                                        tags[0].vlan_proto, tags[0].vlan_id);
+                       kfree(tags);
                }
        }
        rcu_read_unlock();
@@ -1091,9 +1096,8 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
        memcpy(s_addr.sa_data, addr, dev->addr_len);
        s_addr.sa_family = dev->type;
        if (dev_set_mac_address(dev, &s_addr)) {
-               pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
-                      "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
-                      slave->bond->dev->name, dev->name);
+               netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
+                          dev->name);
                return -EOPNOTSUPP;
        }
        return 0;
@@ -1221,7 +1225,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *has_bond_addr = bond->curr_active_slave;
+       struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
        struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct list_head *iter;
 
@@ -1267,13 +1271,12 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        if (free_mac_slave) {
                alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
 
-               pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
-                       bond->dev->name, slave->dev->name,
-                       free_mac_slave->dev->name);
+               netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+                           slave->dev->name, free_mac_slave->dev->name);
 
        } else if (has_bond_addr) {
-               pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
-                      bond->dev->name, slave->dev->name);
+               netdev_err(bond->dev, "the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
+                          slave->dev->name);
                return -EFAULT;
        }
 
@@ -1406,9 +1409,39 @@ out:
        return NETDEV_TX_OK;
 }
 
+static int bond_tlb_update_slave_arr(struct bonding *bond,
+                                    struct slave *skipslave)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct slave *tx_slave;
+       struct list_head *iter;
+       struct tlb_up_slave *new_arr, *old_arr;
+
+       new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
+                         GFP_ATOMIC);
+       if (!new_arr)
+               return -ENOMEM;
+
+       bond_for_each_slave(bond, tx_slave, iter) {
+               if (!bond_slave_can_tx(tx_slave))
+                       continue;
+               if (skipslave == tx_slave)
+                       continue;
+               new_arr->arr[new_arr->count++] = tx_slave;
+       }
+
+       old_arr = rtnl_dereference(bond_info->slave_arr);
+       rcu_assign_pointer(bond_info->slave_arr, new_arr);
+       if (old_arr)
+               kfree_rcu(old_arr, rcu);
+
+       return 0;
+}
+
 int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
        struct ethhdr *eth_data;
        struct slave *tx_slave = NULL;
        u32 hash_index;
@@ -1429,12 +1462,12 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                                                              hash_index & 0xFF,
                                                              skb->len);
                        } else {
-                               struct list_head *iter;
-                               int idx = hash_index % bond->slave_cnt;
+                               struct tlb_up_slave *slaves;
 
-                               bond_for_each_slave_rcu(bond, tx_slave, iter)
-                                       if (--idx < 0)
-                                               break;
+                               slaves = rcu_dereference(bond_info->slave_arr);
+                               if (slaves && slaves->count)
+                                       tx_slave = slaves->arr[hash_index %
+                                                              slaves->count];
                        }
                        break;
                }
@@ -1575,7 +1608,7 @@ void bond_alb_monitor(struct work_struct *work)
                         * use mac of the slave device.
                         * In RLB mode, we always use strict matches.
                         */
-                       strict_match = (slave != bond->curr_active_slave ||
+                       strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
                                        bond_info->rlb_enabled);
                        alb_send_learning_packets(slave, slave->dev->dev_addr,
                                                  strict_match);
@@ -1593,7 +1626,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                bond_for_each_slave_rcu(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
-                       if (slave == bond->curr_active_slave) {
+                       if (slave == rcu_access_pointer(bond->curr_active_slave)) {
                                SLAVE_TLB_INFO(slave).load =
                                        bond_info->unbalanced_load /
                                                BOND_TLB_REBALANCE_INTERVAL;
@@ -1625,7 +1658,8 @@ void bond_alb_monitor(struct work_struct *work)
                         * because a slave was disabled then
                         * it can now leave promiscuous mode.
                         */
-                       dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+                       dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
+                                           -1);
                        bond_info->primary_is_promisc = 0;
 
                        rtnl_unlock();
@@ -1698,6 +1732,11 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
                bond->alb_info.rx_slave = NULL;
                rlb_clear_slave(bond, slave);
        }
+
+       if (bond_is_nondyn_tlb(bond))
+               if (bond_tlb_update_slave_arr(bond, slave))
+                       pr_err("Failed to build slave-array for TLB mode.\n");
+
 }
 
 /* Caller must hold bond lock for read */
@@ -1721,6 +1760,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
                         */
                }
        }
+
+       if (bond_is_nondyn_tlb(bond)) {
+               if (bond_tlb_update_slave_arr(bond, NULL))
+                       pr_err("Failed to build slave-array for TLB mode.\n");
+       }
 }
 
 /**
@@ -1742,17 +1786,21 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        __acquires(&bond->curr_slave_lock)
 {
        struct slave *swap_slave;
+       struct slave *curr_active;
 
-       if (bond->curr_active_slave == new_slave)
+       curr_active = rcu_dereference_protected(bond->curr_active_slave,
+                                               !new_slave ||
+                                               lockdep_is_held(&bond->curr_slave_lock));
+       if (curr_active == new_slave)
                return;
 
-       if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
-               dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+       if (curr_active && bond->alb_info.primary_is_promisc) {
+               dev_set_promiscuity(curr_active->dev, -1);
                bond->alb_info.primary_is_promisc = 0;
                bond->alb_info.rlb_promisc_timeout_counter = 0;
        }
 
-       swap_slave = bond->curr_active_slave;
+       swap_slave = curr_active;
        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
        if (!new_slave || !bond_has_slaves(bond))
@@ -1818,6 +1866,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct sockaddr *sa = addr;
+       struct slave *curr_active;
        struct slave *swap_slave;
        int res;
 
@@ -1834,23 +1883,24 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
         * Otherwise we'll need to pass the new address to it and handle
         * duplications.
         */
-       if (!bond->curr_active_slave)
+       curr_active = rtnl_dereference(bond->curr_active_slave);
+       if (!curr_active)
                return 0;
 
        swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
 
        if (swap_slave) {
-               alb_swap_mac_addr(swap_slave, bond->curr_active_slave);
-               alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+               alb_swap_mac_addr(swap_slave, curr_active);
+               alb_fasten_mac_swap(bond, swap_slave, curr_active);
        } else {
-               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave,
+               alb_send_learning_packets(curr_active,
                                          bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
-                       rlb_req_update_slave_clients(bond, bond->curr_active_slave);
+                       rlb_req_update_slave_clients(bond, curr_active);
                }
                read_unlock(&bond->lock);
        }
index 5fc76c01636cb6eb0e9e96d14fc0c79566741900..aaeac61d03cf804524ac602a8f5870a30aee0b73 100644 (file)
@@ -139,12 +139,20 @@ struct tlb_slave_info {
                         */
 };
 
+struct tlb_up_slave {
+       unsigned int    count;
+       struct rcu_head rcu;
+       struct slave    *arr[0];
+};
+
 struct alb_bond_info {
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        spinlock_t              tx_hashtbl_lock;
        u32                     unbalanced_load;
        int                     tx_rebalance_counter;
        int                     lp_counter;
+       /* -------- non-dynamic tlb mode only ---------*/
+       struct tlb_up_slave __rcu *slave_arr;     /* Up slaves */
        /* -------- rlb parameters -------- */
        int rlb_enabled;
        struct rlb_client_info  *rx_hashtbl;    /* Receive hash table */
index 658e761c4568dff39ef18db8c548f8ed349c8ee0..280971b227ea461788797d35b98145699eee52e4 100644 (file)
@@ -69,8 +69,7 @@ void bond_debug_register(struct bonding *bond)
                debugfs_create_dir(bond->dev->name, bonding_debug_root);
 
        if (!bond->debug_dir) {
-               pr_warn("%s: Warning: failed to register to debugfs\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "failed to register to debugfs\n");
                return;
        }
 
@@ -98,8 +97,7 @@ void bond_debug_reregister(struct bonding *bond)
        if (d) {
                bond->debug_dir = d;
        } else {
-               pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
                bond_debug_unregister(bond);
        }
 }
index 701f86cd5993246633b9be643f0801ba6c71b79d..023ec365209c06543d043791194270a95c2c7c35 100644 (file)
@@ -31,8 +31,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -498,11 +496,10 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        int err = 0;
 
        if (bond_uses_primary(bond)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave) {
-                       err = dev_set_promiscuity(bond->curr_active_slave->dev,
-                                                 inc);
-               }
+               struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
+
+               if (curr_active)
+                       err = dev_set_promiscuity(curr_active->dev, inc);
        } else {
                struct slave *slave;
 
@@ -524,11 +521,10 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        int err = 0;
 
        if (bond_uses_primary(bond)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave) {
-                       err = dev_set_allmulti(bond->curr_active_slave->dev,
-                                              inc);
-               }
+               struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
+
+               if (curr_active)
+                       err = dev_set_allmulti(curr_active->dev, inc);
        } else {
                struct slave *slave;
 
@@ -629,8 +625,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 static void bond_set_dev_addr(struct net_device *bond_dev,
                              struct net_device *slave_dev)
 {
-       pr_debug("bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
-                bond_dev, slave_dev, slave_dev->addr_len);
+       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
+                  bond_dev, slave_dev, slave_dev->addr_len);
        memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
        bond_dev->addr_assign_type = NET_ADDR_STOLEN;
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
@@ -684,8 +680,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 
                rv = dev_set_mac_address(new_active->dev, &saddr);
                if (rv) {
-                       pr_err("%s: Error %d setting MAC of slave %s\n",
-                              bond->dev->name, -rv, new_active->dev->name);
+                       netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
+                                  -rv, new_active->dev->name);
                        goto out;
                }
 
@@ -697,14 +693,14 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 
                rv = dev_set_mac_address(old_active->dev, &saddr);
                if (rv)
-                       pr_err("%s: Error %d setting MAC of slave %s\n",
-                              bond->dev->name, -rv, new_active->dev->name);
+                       netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
+                                  -rv, new_active->dev->name);
 out:
                write_lock_bh(&bond->curr_slave_lock);
                break;
        default:
-               pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
-                      bond->dev->name, bond->params.fail_over_mac);
+               netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
+                          bond->params.fail_over_mac);
                break;
        }
 
@@ -713,7 +709,7 @@ out:
 static bool bond_should_change_active(struct bonding *bond)
 {
        struct slave *prim = bond->primary_slave;
-       struct slave *curr = bond->curr_active_slave;
+       struct slave *curr = bond_deref_active_protected(bond);
 
        if (!prim || !curr || curr->link != BOND_LINK_UP)
                return true;
@@ -765,8 +761,8 @@ static bool bond_should_notify_peers(struct bonding *bond)
        slave = rcu_dereference(bond->curr_active_slave);
        rcu_read_unlock();
 
-       pr_debug("bond_should_notify_peers: bond %s slave %s\n",
-                bond->dev->name, slave ? slave->dev->name : "NULL");
+       netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
+                  slave ? slave->dev->name : "NULL");
 
        if (!slave || !bond->send_peer_notif ||
            test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
@@ -792,7 +788,11 @@ static bool bond_should_notify_peers(struct bonding *bond)
  */
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 {
-       struct slave *old_active = bond->curr_active_slave;
+       struct slave *old_active;
+
+       old_active = rcu_dereference_protected(bond->curr_active_slave,
+                                              !new_active ||
+                                              lockdep_is_held(&bond->curr_slave_lock));
 
        if (old_active == new_active)
                return;
@@ -802,9 +802,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 
                if (new_active->link == BOND_LINK_BACK) {
                        if (bond_uses_primary(bond)) {
-                               pr_info("%s: making interface %s the new active one %d ms earlier\n",
-                                       bond->dev->name, new_active->dev->name,
-                                       (bond->params.updelay - new_active->delay) * bond->params.miimon);
+                               netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
+                                           new_active->dev->name,
+                                           (bond->params.updelay - new_active->delay) * bond->params.miimon);
                        }
 
                        new_active->delay = 0;
@@ -817,8 +817,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
                        if (bond_uses_primary(bond)) {
-                               pr_info("%s: making interface %s the new active one\n",
-                                       bond->dev->name, new_active->dev->name);
+                               netdev_info(bond->dev, "making interface %s the new active one\n",
+                                           new_active->dev->name);
                        }
                }
        }
@@ -900,18 +900,16 @@ void bond_select_active_slave(struct bonding *bond)
        int rv;
 
        best_slave = bond_find_best_slave(bond);
-       if (best_slave != bond->curr_active_slave) {
+       if (best_slave != bond_deref_active_protected(bond)) {
                bond_change_active_slave(bond, best_slave);
                rv = bond_set_carrier(bond);
                if (!rv)
                        return;
 
                if (netif_carrier_ok(bond->dev)) {
-                       pr_info("%s: first active interface up!\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "first active interface up!\n");
                } else {
-                       pr_info("%s: now running without any active interface!\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "now running without any active interface!\n");
                }
        }
 }
@@ -1001,12 +999,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        netdev_features_t mask;
        struct slave *slave;
 
-       if (!bond_has_slaves(bond)) {
-               /* Disable adding VLANs to empty bond. But why? --mq */
-               features |= NETIF_F_VLAN_CHALLENGED;
-               return features;
-       }
-
        mask = features;
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
@@ -1214,36 +1206,38 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        if (!bond->params.use_carrier &&
            slave_dev->ethtool_ops->get_link == NULL &&
            slave_ops->ndo_do_ioctl == NULL) {
-               pr_warn("%s: Warning: no link monitoring support for %s\n",
-                       bond_dev->name, slave_dev->name);
+               netdev_warn(bond_dev, "no link monitoring support for %s\n",
+                           slave_dev->name);
        }
 
        /* already enslaved */
        if (slave_dev->flags & IFF_SLAVE) {
-               pr_debug("Error: Device was already enslaved\n");
+               netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
                return -EBUSY;
        }
 
        if (bond_dev == slave_dev) {
-               pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+               netdev_err(bond_dev, "cannot enslave bond to itself.\n");
                return -EPERM;
        }
 
        /* vlan challenged mutual exclusion */
        /* no need to lock since we're protected by rtnl_lock */
        if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
-               pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+               netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
+                          slave_dev->name);
                if (vlan_uses_dev(bond_dev)) {
-                       pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
-                              bond_dev->name, slave_dev->name, bond_dev->name);
+                       netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
+                                  slave_dev->name, bond_dev->name);
                        return -EPERM;
                } else {
-                       pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
-                               bond_dev->name, slave_dev->name,
-                               slave_dev->name, bond_dev->name);
+                       netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+                                   slave_dev->name, slave_dev->name,
+                                   bond_dev->name);
                }
        } else {
-               pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+               netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
+                          slave_dev->name);
        }
 
        /*
@@ -1253,8 +1247,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * enslaving it; the old ifenslave will not.
         */
        if ((slave_dev->flags & IFF_UP)) {
-               pr_err("%s is up - this may be due to an out of date ifenslave\n",
-                      slave_dev->name);
+               netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
+                          slave_dev->name);
                res = -EPERM;
                goto err_undo_flags;
        }
@@ -1268,16 +1262,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         */
        if (!bond_has_slaves(bond)) {
                if (bond_dev->type != slave_dev->type) {
-                       pr_debug("%s: change device type from %d to %d\n",
-                                bond_dev->name,
-                                bond_dev->type, slave_dev->type);
+                       netdev_dbg(bond_dev, "change device type from %d to %d\n",
+                                  bond_dev->type, slave_dev->type);
 
                        res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
                                                       bond_dev);
                        res = notifier_to_errno(res);
                        if (res) {
-                               pr_err("%s: refused to change device type\n",
-                                      bond_dev->name);
+                               netdev_err(bond_dev, "refused to change device type\n");
                                res = -EBUSY;
                                goto err_undo_flags;
                        }
@@ -1297,26 +1289,24 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                                 bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
-               pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
-                      slave_dev->name, slave_dev->type, bond_dev->type);
+               netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+                          slave_dev->name, slave_dev->type, bond_dev->type);
                res = -EINVAL;
                goto err_undo_flags;
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (!bond_has_slaves(bond)) {
-                       pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
-                               bond_dev->name);
-                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+               netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
+               if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+                   bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+                       if (!bond_has_slaves(bond)) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
-                               pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
-                                       bond_dev->name);
+                               netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
+                       } else {
+                               netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
+                               res = -EOPNOTSUPP;
+                               goto err_undo_flags;
                        }
-               } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
-                       pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
-                              bond_dev->name);
-                       res = -EOPNOTSUPP;
-                       goto err_undo_flags;
                }
        }
 
@@ -1346,7 +1336,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        new_slave->original_mtu = slave_dev->mtu;
        res = dev_set_mtu(slave_dev, bond->dev->mtu);
        if (res) {
-               pr_debug("Error %d calling dev_set_mtu\n", res);
+               netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
                goto err_free;
        }
 
@@ -1367,7 +1357,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                addr.sa_family = slave_dev->type;
                res = dev_set_mac_address(slave_dev, &addr);
                if (res) {
-                       pr_debug("Error %d calling set_mac_address\n", res);
+                       netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
                        goto err_restore_mtu;
                }
        }
@@ -1375,7 +1365,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
-               pr_debug("Opening slave %s failed\n", slave_dev->name);
+               netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
                goto err_restore_mac;
        }
 
@@ -1425,8 +1415,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        res = vlan_vids_add_by_dev(slave_dev, bond_dev);
        if (res) {
-               pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
-                      bond_dev->name, slave_dev->name);
+               netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
+                          slave_dev->name);
                goto err_close;
        }
 
@@ -1455,12 +1445,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                         * supported); thus, we don't need to change
                         * the messages for netif_carrier.
                         */
-                       pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+                                   slave_dev->name);
                } else if (link_reporting == -1) {
                        /* unable get link status using mii/ethtool */
-                       pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+                                   slave_dev->name);
                }
        }
 
@@ -1485,9 +1475,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        if (new_slave->link != BOND_LINK_DOWN)
                new_slave->last_link_up = jiffies;
-       pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
-                new_slave->link == BOND_LINK_DOWN ? "DOWN" :
-                (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+       netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
+                  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+                  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
        if (bond_uses_primary(bond) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
@@ -1528,7 +1518,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                break;
        default:
-               pr_debug("This slave is always active in trunk mode\n");
+               netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
 
                /* always active in trunk mode */
                bond_set_active_slave(new_slave);
@@ -1537,7 +1527,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 * anyway (it holds no special properties of the bond device),
                 * so we can change it without calling change_active_interface()
                 */
-               if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
+               if (!rcu_access_pointer(bond->curr_active_slave) &&
+                   new_slave->link == BOND_LINK_UP)
                        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
                break;
@@ -1547,8 +1538,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
                if (slave_enable_netpoll(new_slave)) {
-                       pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
-                               bond_dev->name);
+                       netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
                        res = -EBUSY;
                        goto err_detach;
                }
@@ -1558,19 +1548,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
        if (res) {
-               pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+               netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
                goto err_detach;
        }
 
        res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
        if (res) {
-               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+               netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
                goto err_unregister;
        }
 
        res = bond_sysfs_slave_add(new_slave);
        if (res) {
-               pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
+               netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
                goto err_upper_unlink;
        }
 
@@ -1586,10 +1576,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                unblock_netpoll_tx();
        }
 
-       pr_info("%s: Enslaving %s as %s interface with %s link\n",
-               bond_dev->name, slave_dev->name,
-               bond_is_active_slave(new_slave) ? "an active" : "a backup",
-               new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+       netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
+                   slave_dev->name,
+                   bond_is_active_slave(new_slave) ? "an active" : "a backup",
+                   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
 
        /* enslave is successful */
        return 0;
@@ -1608,7 +1598,7 @@ err_detach:
        vlan_vids_del_by_dev(slave_dev, bond_dev);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
-       if (bond->curr_active_slave == new_slave) {
+       if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_change_active_slave(bond, NULL);
@@ -1674,8 +1664,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
            !netdev_has_upper_dev(slave_dev, bond_dev)) {
-               pr_err("%s: Error: cannot release %s\n",
-                      bond_dev->name, slave_dev->name);
+               netdev_err(bond_dev, "cannot release %s\n",
+                          slave_dev->name);
                return -EINVAL;
        }
 
@@ -1684,8 +1674,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        slave = bond_get_slave_by_dev(bond, slave_dev);
        if (!slave) {
                /* not a slave of this bond */
-               pr_info("%s: %s not enslaved\n",
-                       bond_dev->name, slave_dev->name);
+               netdev_info(bond_dev, "%s not enslaved\n",
+                           slave_dev->name);
                unblock_netpoll_tx();
                return -EINVAL;
        }
@@ -1705,23 +1695,21 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        write_unlock_bh(&bond->lock);
 
-       pr_info("%s: Releasing %s interface %s\n",
-               bond_dev->name,
-               bond_is_active_slave(slave) ? "active" : "backup",
-               slave_dev->name);
+       netdev_info(bond_dev, "Releasing %s interface %s\n",
+                   bond_is_active_slave(slave) ? "active" : "backup",
+                   slave_dev->name);
 
-       oldcurrent = bond->curr_active_slave;
+       oldcurrent = rcu_access_pointer(bond->curr_active_slave);
 
-       bond->current_arp_slave = NULL;
+       RCU_INIT_POINTER(bond->current_arp_slave, NULL);
 
        if (!all && (!bond->params.fail_over_mac ||
                     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
-                       pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
-                               bond_dev->name, slave_dev->name,
-                               slave->perm_hwaddr,
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+                                   slave_dev->name, slave->perm_hwaddr,
+                                   bond_dev->name, slave_dev->name);
        }
 
        if (bond->primary_slave == slave)
@@ -1760,13 +1748,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
-
-               if (vlan_uses_dev(bond_dev)) {
-                       pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
-                               bond_dev->name, bond_dev->name);
-                       pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
-                               bond_dev->name);
-               }
        }
 
        unblock_netpoll_tx();
@@ -1781,8 +1762,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        bond_compute_features(bond);
        if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
            (old_features & NETIF_F_VLAN_CHALLENGED))
-               pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
-                       bond_dev->name, slave_dev->name, bond_dev->name);
+               netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
+                           slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
        vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1849,8 +1830,8 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        ret = bond_release(bond_dev, slave_dev);
        if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               pr_info("%s: Destroying bond %s\n",
-                       bond_dev->name, bond_dev->name);
+               netdev_info(bond_dev, "Destroying bond %s\n",
+                           bond_dev->name);
                unregister_netdevice(bond_dev);
        }
        return ret;
@@ -1891,7 +1872,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 
 /*-------------------------------- Monitoring -------------------------------*/
 
-
+/* called with rcu_read_lock() */
 static int bond_miimon_inspect(struct bonding *bond)
 {
        int link_state, commit = 0;
@@ -1899,7 +1880,7 @@ static int bond_miimon_inspect(struct bonding *bond)
        struct slave *slave;
        bool ignore_updelay;
 
-       ignore_updelay = !bond->curr_active_slave ? true : false;
+       ignore_updelay = !rcu_dereference(bond->curr_active_slave);
 
        bond_for_each_slave_rcu(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
@@ -1914,14 +1895,13 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->link = BOND_LINK_FAIL;
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
-                               pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
-                                       bond->dev->name,
-                                       (BOND_MODE(bond) ==
-                                        BOND_MODE_ACTIVEBACKUP) ?
-                                       (bond_is_active_slave(slave) ?
-                                        "active " : "backup ") : "",
-                                       slave->dev->name,
-                                       bond->params.downdelay * bond->params.miimon);
+                               netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
+                                           (BOND_MODE(bond) ==
+                                            BOND_MODE_ACTIVEBACKUP) ?
+                                            (bond_is_active_slave(slave) ?
+                                             "active " : "backup ") : "",
+                                           slave->dev->name,
+                                           bond->params.downdelay * bond->params.miimon);
                        }
                        /*FALLTHRU*/
                case BOND_LINK_FAIL:
@@ -1931,11 +1911,10 @@ static int bond_miimon_inspect(struct bonding *bond)
                                 */
                                slave->link = BOND_LINK_UP;
                                slave->last_link_up = jiffies;
-                               pr_info("%s: link status up again after %d ms for interface %s\n",
-                                       bond->dev->name,
-                                       (bond->params.downdelay - slave->delay) *
-                                       bond->params.miimon,
-                                       slave->dev->name);
+                               netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
+                                           (bond->params.downdelay - slave->delay) *
+                                           bond->params.miimon,
+                                           slave->dev->name);
                                continue;
                        }
 
@@ -1956,21 +1935,20 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
-                               pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
-                                       bond->dev->name, slave->dev->name,
-                                       ignore_updelay ? 0 :
-                                       bond->params.updelay *
-                                       bond->params.miimon);
+                               netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
+                                           slave->dev->name,
+                                           ignore_updelay ? 0 :
+                                           bond->params.updelay *
+                                           bond->params.miimon);
                        }
                        /*FALLTHRU*/
                case BOND_LINK_BACK:
                        if (!link_state) {
                                slave->link = BOND_LINK_DOWN;
-                               pr_info("%s: link status down again after %d ms for interface %s\n",
-                                       bond->dev->name,
-                                       (bond->params.updelay - slave->delay) *
-                                       bond->params.miimon,
-                                       slave->dev->name);
+                               netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
+                                           (bond->params.updelay - slave->delay) *
+                                           bond->params.miimon,
+                                           slave->dev->name);
 
                                continue;
                        }
@@ -2018,10 +1996,10 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_backup_slave(slave);
                        }
 
-                       pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
-                               bond->dev->name, slave->dev->name,
-                               slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
-                               slave->duplex ? "full" : "half");
+                       netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
+                                   slave->dev->name,
+                                   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+                                   slave->duplex ? "full" : "half");
 
                        /* notify ad that the link status has changed */
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2048,8 +2026,8 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
 
-                       pr_info("%s: link status definitely down for interface %s, disabling it\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
+                                   slave->dev->name);
 
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave,
@@ -2059,15 +2037,14 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_alb_handle_link_change(bond, slave,
                                                            BOND_LINK_DOWN);
 
-                       if (slave == bond->curr_active_slave)
+                       if (slave == rcu_access_pointer(bond->curr_active_slave))
                                goto do_failover;
 
                        continue;
 
                default:
-                       pr_err("%s: invalid new link %d on slave %s\n",
-                              bond->dev->name, slave->new_link,
-                              slave->dev->name);
+                       netdev_err(bond->dev, "invalid new link %d on slave %s\n",
+                                  slave->new_link, slave->dev->name);
                        slave->new_link = BOND_LINK_NOCHANGE;
 
                        continue;
@@ -2168,10 +2145,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
-       int i;
+       struct bond_vlan_tag *outer_tag = tags;
 
-       pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
-                arp_op, slave_dev->name, &dest_ip, &src_ip);
+       netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
+                  arp_op, slave_dev->name, &dest_ip, &src_ip);
 
        skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
                         NULL, slave_dev->dev_addr, NULL);
@@ -2181,30 +2158,42 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                return;
        }
 
+       if (!tags || tags->vlan_proto == VLAN_N_VID)
+               goto xmit;
+
+       tags++;
+
        /* Go through all the tags backwards and add them to the packet */
-       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
-               if (!tags[i].vlan_id)
+       while (tags->vlan_proto != VLAN_N_VID) {
+               if (!tags->vlan_id) {
+                       tags++;
                        continue;
+               }
 
-               pr_debug("inner tag: proto %X vid %X\n",
-                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
-               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
-                                    tags[i].vlan_id);
+               netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
+                          ntohs(outer_tag->vlan_proto), tags->vlan_id);
+               skb = __vlan_put_tag(skb, tags->vlan_proto,
+                                    tags->vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert inner VLAN tag\n");
                        return;
                }
+
+               tags++;
        }
        /* Set the outer tag */
-       if (tags[0].vlan_id) {
-               pr_debug("outer tag: proto %X vid %X\n",
-                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
-               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
+       if (outer_tag->vlan_id) {
+               netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
+                          ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
+               skb = vlan_put_tag(skb, outer_tag->vlan_proto,
+                                  outer_tag->vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
                }
        }
+
+xmit:
        arp_xmit(skb);
 }
 
@@ -2214,46 +2203,50 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
  * When the path is validated, collect any vlan information in the
  * path.
  */
-bool bond_verify_device_path(struct net_device *start_dev,
-                            struct net_device *end_dev,
-                            struct bond_vlan_tag *tags)
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+                                             struct net_device *end_dev,
+                                             int level)
 {
+       struct bond_vlan_tag *tags;
        struct net_device *upper;
        struct list_head  *iter;
-       int  idx;
 
-       if (start_dev == end_dev)
-               return true;
+       if (start_dev == end_dev) {
+               tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC);
+               if (!tags)
+                       return ERR_PTR(-ENOMEM);
+               tags[level].vlan_proto = VLAN_N_VID;
+               return tags;
+       }
 
        netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
-               if (bond_verify_device_path(upper, end_dev, tags)) {
-                       if (is_vlan_dev(upper)) {
-                               idx = vlan_get_encap_level(upper);
-                               if (idx >= BOND_MAX_VLAN_ENCAP)
-                                       return false;
-
-                               tags[idx].vlan_proto =
-                                                   vlan_dev_vlan_proto(upper);
-                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
-                       }
-                       return true;
+               tags = bond_verify_device_path(upper, end_dev, level + 1);
+               if (IS_ERR_OR_NULL(tags)) {
+                       if (IS_ERR(tags))
+                               return tags;
+                       continue;
                }
+               if (is_vlan_dev(upper)) {
+                       tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
+                       tags[level].vlan_id = vlan_dev_vlan_id(upper);
+               }
+
+               return tags;
        }
 
-       return false;
+       return NULL;
 }
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
        struct rtable *rt;
-       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
+       struct bond_vlan_tag *tags;
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
-       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
-               pr_debug("basa: target %pI4\n", &targets[i]);
-               memset(tags, 0, sizeof(tags));
+               netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
+               tags = NULL;
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2276,16 +2269,15 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
+               tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
                rcu_read_unlock();
 
-               if (ret)
+               if (!IS_ERR_OR_NULL(tags))
                        goto found;
 
                /* Not our device - skip */
-               pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
-                        bond->dev->name, &targets[i],
-                        rt->dst.dev ? rt->dst.dev->name : "NULL");
+               netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
+                          &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
 
                ip_rt_put(rt);
                continue;
@@ -2295,6 +2287,8 @@ found:
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
                              addr, tags);
+               if (!tags)
+                       kfree(tags);
        }
 }
 
@@ -2303,13 +2297,15 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
        int i;
 
        if (!sip || !bond_has_this_ip(bond, tip)) {
-               pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
+               netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
+                          &sip, &tip);
                return;
        }
 
        i = bond_get_targets_ip(bond->params.arp_targets, sip);
        if (i == -1) {
-               pr_debug("bva: sip %pI4 not found in targets\n", &sip);
+               netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
+                          &sip);
                return;
        }
        slave->last_rx = jiffies;
@@ -2336,8 +2332,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
 
        alen = arp_hdr_len(bond->dev);
 
-       pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
-                bond->dev->name, skb->dev->name);
+       netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
+                  skb->dev->name);
 
        if (alen > skb_headlen(skb)) {
                arp = kmalloc(alen, GFP_ATOMIC);
@@ -2361,10 +2357,10 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
        arp_ptr += 4 + bond->dev->addr_len;
        memcpy(&tip, arp_ptr, 4);
 
-       pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
-                bond->dev->name, slave->dev->name, bond_slave_state(slave),
-                bond->params.arp_validate, slave_do_arp_validate(bond, slave),
-                &sip, &tip);
+       netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
+                  slave->dev->name, bond_slave_state(slave),
+                    bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+                    &sip, &tip);
 
        curr_active_slave = rcu_dereference(bond->curr_active_slave);
 
@@ -2429,7 +2425,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
 
        rcu_read_lock();
 
-       oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
+       oldcurrent = rcu_dereference(bond->curr_active_slave);
        /* see if any of the previous devices are up now (i.e. they have
         * xmt and rcv traffic). the curr_active_slave does not come into
         * the picture unless it is null. also, slave->last_link_up is not
@@ -2454,14 +2450,12 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                 * is closed.
                                 */
                                if (!oldcurrent) {
-                                       pr_info("%s: link status definitely up for interface %s\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
+                                       netdev_info(bond->dev, "link status definitely up for interface %s\n",
+                                                   slave->dev->name);
                                        do_failover = 1;
                                } else {
-                                       pr_info("%s: interface %s is now up\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
+                                       netdev_info(bond->dev, "interface %s is now up\n",
+                                                   slave->dev->name);
                                }
                        }
                } else {
@@ -2480,8 +2474,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                if (slave->link_failure_count < UINT_MAX)
                                        slave->link_failure_count++;
 
-                               pr_info("%s: interface %s is now down\n",
-                                       bond->dev->name, slave->dev->name);
+                               netdev_info(bond->dev, "interface %s is now down\n",
+                                           slave->dev->name);
 
                                if (slave == oldcurrent)
                                        do_failover = 1;
@@ -2577,7 +2571,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
                 * before being taken out
                 */
                if (!bond_is_active_slave(slave) &&
-                   !bond->current_arp_slave &&
+                   !rcu_access_pointer(bond->current_arp_slave) &&
                    !bond_time_in_interval(bond, last_rx, 3)) {
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -2620,21 +2614,24 @@ static void bond_ab_arp_commit(struct bonding *bond)
 
                case BOND_LINK_UP:
                        trans_start = dev_trans_start(slave->dev);
-                       if (bond->curr_active_slave != slave ||
-                           (!bond->curr_active_slave &&
+                       if (rtnl_dereference(bond->curr_active_slave) != slave ||
+                           (!rtnl_dereference(bond->curr_active_slave) &&
                             bond_time_in_interval(bond, trans_start, 1))) {
+                               struct slave *current_arp_slave;
+
+                               current_arp_slave = rtnl_dereference(bond->current_arp_slave);
                                slave->link = BOND_LINK_UP;
-                               if (bond->current_arp_slave) {
+                               if (current_arp_slave) {
                                        bond_set_slave_inactive_flags(
-                                               bond->current_arp_slave,
+                                               current_arp_slave,
                                                BOND_SLAVE_NOTIFY_NOW);
-                                       bond->current_arp_slave = NULL;
+                                       RCU_INIT_POINTER(bond->current_arp_slave, NULL);
                                }
 
-                               pr_info("%s: link status definitely up for interface %s\n",
-                                       bond->dev->name, slave->dev->name);
+                               netdev_info(bond->dev, "link status definitely up for interface %s\n",
+                                           slave->dev->name);
 
-                               if (!bond->curr_active_slave ||
+                               if (!rtnl_dereference(bond->curr_active_slave) ||
                                    (slave == bond->primary_slave))
                                        goto do_failover;
 
@@ -2650,20 +2647,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_NOW);
 
-                       pr_info("%s: link status definitely down for interface %s, disabling it\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
+                                   slave->dev->name);
 
-                       if (slave == bond->curr_active_slave) {
-                               bond->current_arp_slave = NULL;
+                       if (slave == rtnl_dereference(bond->curr_active_slave)) {
+                               RCU_INIT_POINTER(bond->current_arp_slave, NULL);
                                goto do_failover;
                        }
 
                        continue;
 
                default:
-                       pr_err("%s: impossible: new_link %d on slave %s\n",
-                              bond->dev->name, slave->new_link,
-                              slave->dev->name);
+                       netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
+                                  slave->new_link, slave->dev->name);
                        continue;
                }
 
@@ -2694,9 +2690,9 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
 
        if (curr_arp_slave && curr_active_slave)
-               pr_info("PROBE: c_arp %s && cas %s BAD\n",
-                       curr_arp_slave->dev->name,
-                       curr_active_slave->dev->name);
+               netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
+                           curr_arp_slave->dev->name,
+                           curr_active_slave->dev->name);
 
        if (curr_active_slave) {
                bond_arp_send_all(bond, curr_active_slave);
@@ -2737,8 +2733,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_LATER);
 
-                       pr_info("%s: backup interface %s is now down\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "backup interface %s is now down\n",
+                                   slave->dev->name);
                }
                if (slave == curr_arp_slave)
                        found = true;
@@ -2934,9 +2930,8 @@ static int bond_slave_netdev_event(unsigned long event,
                        break;
                }
 
-               pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
-                       bond->dev->name,
-                       bond->primary_slave ? slave_dev->name : "none");
+               netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
+                           bond->primary_slave ? slave_dev->name : "none");
 
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
@@ -2971,19 +2966,18 @@ static int bond_netdev_event(struct notifier_block *this,
 {
        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
-       pr_debug("event_dev: %s, event: %lx\n",
-                event_dev ? event_dev->name : "None", event);
+       netdev_dbg(event_dev, "event: %lx\n", event);
 
        if (!(event_dev->priv_flags & IFF_BONDING))
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
-               pr_debug("IFF_MASTER\n");
+               netdev_dbg(event_dev, "IFF_MASTER\n");
                return bond_master_netdev_event(event, event_dev);
        }
 
        if (event_dev->flags & IFF_SLAVE) {
-               pr_debug("IFF_SLAVE\n");
+               netdev_dbg(event_dev, "IFF_SLAVE\n");
                return bond_slave_netdev_event(event, event_dev);
        }
 
@@ -2999,11 +2993,11 @@ static struct notifier_block bond_netdev_notifier = {
 /* L2 hash helper */
 static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-
-       if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-               return data->h_dest[5] ^ data->h_source[5];
+       struct ethhdr *ep, hdr_tmp;
 
+       ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
+       if (ep)
+               return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
        return 0;
 }
 
@@ -3110,8 +3104,8 @@ static int bond_open(struct net_device *bond_dev)
        if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
-                       if (bond_uses_primary(bond)
-                               && (slave != bond->curr_active_slave)) {
+                       if (bond_uses_primary(bond) &&
+                           slave != rcu_access_pointer(bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
                        } else {
@@ -3225,7 +3219,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
        struct net *net;
        int res = 0;
 
-       pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
+       netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
 
        switch (cmd) {
        case SIOCGMIIPHY:
@@ -3295,12 +3289,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 
        slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
 
-       pr_debug("slave_dev=%p:\n", slave_dev);
+       netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
 
        if (!slave_dev)
                return -ENODEV;
 
-       pr_debug("slave_dev->name=%s:\n", slave_dev->name);
+       netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
        switch (cmd) {
        case BOND_ENSLAVE_OLD:
        case SIOCBONDENSLAVE:
@@ -3427,8 +3421,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
        struct list_head *iter;
        int res = 0;
 
-       pr_debug("bond=%p, name=%s, new_mtu=%d\n",
-                bond, bond_dev ? bond_dev->name : "None", new_mtu);
+       netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
 
        /* Can't hold bond->lock with bh disabled here since
         * some base drivers panic. On the other hand we can't
@@ -3446,8 +3439,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         */
 
        bond_for_each_slave(bond, slave, iter) {
-               pr_debug("s %p c_m %p\n",
-                        slave, slave->dev->netdev_ops->ndo_change_mtu);
+               netdev_dbg(bond_dev, "s %p c_m %p\n",
+                          slave, slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
 
@@ -3460,7 +3453,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
                         * means changing their mtu from timer context, which
                         * is probably not a good idea.
                         */
-                       pr_debug("err %d %s\n", res, slave->dev->name);
+                       netdev_dbg(bond_dev, "err %d %s\n", res,
+                                  slave->dev->name);
                        goto unwind;
                }
        }
@@ -3479,8 +3473,8 @@ unwind:
 
                tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
                if (tmp_res) {
-                       pr_debug("unwind err %d dev %s\n",
-                                tmp_res, rollback_slave->dev->name);
+                       netdev_dbg(bond_dev, "unwind err %d dev %s\n",
+                                  tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3506,8 +3500,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
                return bond_alb_set_mac_address(bond_dev, addr);
 
 
-       pr_debug("bond=%p, name=%s\n",
-                bond, bond_dev ? bond_dev->name : "None");
+       netdev_dbg(bond_dev, "bond=%p\n", bond);
 
        /* If fail_over_mac is enabled, do nothing and return success.
         * Returning an error causes ifenslave to fail.
@@ -3535,7 +3528,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         */
 
        bond_for_each_slave(bond, slave, iter) {
-               pr_debug("slave %p %s\n", slave, slave->dev->name);
+               netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
                res = dev_set_mac_address(slave->dev, addr);
                if (res) {
                        /* TODO: consider downing the slave
@@ -3544,7 +3537,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
                         * breakage anyway until ARP finish
                         * updating, so...
                         */
-                       pr_debug("err %d %s\n", res, slave->dev->name);
+                       netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
                        goto unwind;
                }
        }
@@ -3566,8 +3559,8 @@ unwind:
 
                tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
                if (tmp_res) {
-                       pr_debug("unwind err %d dev %s\n",
-                                tmp_res, rollback_slave->dev->name);
+                       netdev_dbg(bond_dev, "unwind err %d dev %s\n",
+                                  tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3814,8 +3807,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
                return bond_tlb_xmit(skb, dev);
        default:
                /* Should never happen, mode already checked */
-               pr_err("%s: Error: Unknown bonding mode %d\n",
-                      dev->name, BOND_MODE(bond));
+               netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
                WARN_ON_ONCE(1);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3956,13 +3948,6 @@ void bond_setup(struct net_device *bond_dev)
        bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
        bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
-       /* At first, we block adding VLANs. That's the only way to
-        * prevent problems that occur when adding VLANs over an
-        * empty bond. The block will be removed once non-challenged
-        * slaves are enslaved.
-        */
-       bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
-
        /* don't acquire bond device's netif_tx_lock when
         * transmitting */
        bond_dev->features |= NETIF_F_LLTX;
@@ -4002,7 +3987,7 @@ static void bond_uninit(struct net_device *bond_dev)
        /* Release the bonded slaves */
        bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
-       pr_info("%s: Released all slaves\n", bond_dev->name);
+       netdev_info(bond_dev, "Released all slaves\n");
 
        list_del(&bond->bond_list);
 
@@ -4391,7 +4376,7 @@ static int bond_init(struct net_device *bond_dev)
        struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 
-       pr_debug("Begin bond_init for %s\n", bond_dev->name);
+       netdev_dbg(bond_dev, "Begin bond_init\n");
 
        /*
         * Initialize locks that may be required during
@@ -4440,7 +4425,7 @@ int bond_create(struct net *net, const char *name)
        rtnl_lock();
 
        bond_dev = alloc_netdev_mq(sizeof(struct bonding),
-                                  name ? name : "bond%d",
+                                  name ? name : "bond%d", NET_NAME_UNKNOWN,
                                   bond_setup, tx_queues);
        if (!bond_dev) {
                pr_err("%s: eek! can't alloc netdev!\n", name);
@@ -4481,7 +4466,6 @@ static void __net_exit bond_net_exit(struct net *net)
        LIST_HEAD(list);
 
        bond_destroy_sysfs(bn);
-       bond_destroy_proc_dir(bn);
 
        /* Kill off any bonds created after unregistering bond rtnl ops */
        rtnl_lock();
@@ -4489,6 +4473,8 @@ static void __net_exit bond_net_exit(struct net *net)
                unregister_netdevice_queue(bond->dev, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
+
+       bond_destroy_proc_dir(bn);
 }
 
 static struct pernet_operations bond_net_ops = {
index 5ab3c1847e6760e2f3ef7d2ec35085c2d4bf655b..d163e112f04ce00956fedd17221c3c283ff8a3c6 100644 (file)
@@ -9,8 +9,6 @@
  * (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
@@ -181,8 +179,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
 
                if (arp_interval && miimon) {
-                       pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
                        return -EINVAL;
                }
 
@@ -207,8 +204,7 @@ static int bond_changelink(struct net_device *bond_dev,
                        i++;
                }
                if (i == 0 && bond->params.arp_interval)
-                       pr_warn("%s: Removing last arp target with arp_interval on\n",
-                               bond->dev->name);
+                       netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
                if (err)
                        return err;
        }
@@ -216,8 +212,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
 
                if (arp_validate && miimon) {
-                       pr_err("%s: ARP validating cannot be used with MII monitoring\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
                        return -EINVAL;
                }
 
@@ -398,20 +393,31 @@ static size_t bond_get_size(const struct net_device *bond_dev)
                0;
 }
 
+static int bond_option_active_slave_get_ifindex(struct bonding *bond)
+{
+       const struct net_device *slave;
+       int ifindex;
+
+       rcu_read_lock();
+       slave = bond_option_active_slave_get_rcu(bond);
+       ifindex = slave ? slave->ifindex : 0;
+       rcu_read_unlock();
+       return ifindex;
+}
+
 static int bond_fill_info(struct sk_buff *skb,
                          const struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct net_device *slave_dev = bond_option_active_slave_get(bond);
-       struct nlattr *targets;
        unsigned int packets_per_slave;
-       int i, targets_added;
+       int ifindex, i, targets_added;
+       struct nlattr *targets;
 
        if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
                goto nla_put_failure;
 
-       if (slave_dev &&
-           nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+       ifindex = bond_option_active_slave_get_ifindex(bond);
+       if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
                goto nla_put_failure;
 
        if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
index 540e0167bf24992037165d82c5af307d85f15f02..dc73463c2c237d99bf26c9b3ff21512c36f69504 100644 (file)
@@ -9,8 +9,6 @@
  * (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/errno.h>
 #include <linux/if.h>
 #include <linux/netdevice.h>
@@ -544,9 +542,8 @@ static void bond_opt_dep_print(struct bonding *bond,
        params = &bond->params;
        modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
        if (test_bit(params->mode, &opt->unsuppmodes))
-               pr_err("%s: option %s: mode dependency failed, not supported in mode %s(%llu)\n",
-                      bond->dev->name, opt->name,
-                      modeval->string, modeval->value);
+               netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
+                          opt->name, modeval->string, modeval->value);
 }
 
 static void bond_opt_error_interpret(struct bonding *bond,
@@ -564,31 +561,30 @@ static void bond_opt_error_interpret(struct bonding *bond,
                                p = strchr(val->string, '\n');
                                if (p)
                                        *p = '\0';
-                               pr_err("%s: option %s: invalid value (%s)\n",
-                                      bond->dev->name, opt->name, val->string);
+                               netdev_err(bond->dev, "option %s: invalid value (%s)\n",
+                                          opt->name, val->string);
                        } else {
-                               pr_err("%s: option %s: invalid value (%llu)\n",
-                                      bond->dev->name, opt->name, val->value);
+                               netdev_err(bond->dev, "option %s: invalid value (%llu)\n",
+                                          opt->name, val->value);
                        }
                }
                minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
                maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
                if (!maxval)
                        break;
-               pr_err("%s: option %s: allowed values %llu - %llu\n",
-                      bond->dev->name, opt->name, minval ? minval->value : 0,
-                      maxval->value);
+               netdev_err(bond->dev, "option %s: allowed values %llu - %llu\n",
+                          opt->name, minval ? minval->value : 0, maxval->value);
                break;
        case -EACCES:
                bond_opt_dep_print(bond, opt);
                break;
        case -ENOTEMPTY:
-               pr_err("%s: option %s: unable to set because the bond device has slaves\n",
-                      bond->dev->name, opt->name);
+               netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
+                          opt->name);
                break;
        case -EBUSY:
-               pr_err("%s: option %s: unable to set because the bond device is up\n",
-                      bond->dev->name, opt->name);
+               netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
+                          opt->name);
                break;
        default:
                break;
@@ -671,17 +667,18 @@ const struct bond_option *bond_opt_get(unsigned int option)
        return &bond_opts[option];
 }
 
-int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
+static int bond_option_mode_set(struct bonding *bond,
+                               const struct bond_opt_value *newval)
 {
        if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
-               pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
-                       bond->dev->name, newval->string);
+               netdev_info(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+                           newval->string);
                /* disable arp monitoring */
                bond->params.arp_interval = 0;
                /* set miimon to default value */
                bond->params.miimon = BOND_DEFAULT_MIIMON;
-               pr_info("%s: Setting MII monitoring interval to %d\n",
-                       bond->dev->name, bond->params.miimon);
+               netdev_info(bond->dev, "Setting MII monitoring interval to %d\n",
+                           bond->params.miimon);
        }
 
        /* don't cache arp_validate between modes */
@@ -704,11 +701,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
        return __bond_option_active_slave_get(bond, slave);
 }
 
-struct net_device *bond_option_active_slave_get(struct bonding *bond)
-{
-       return __bond_option_active_slave_get(bond, bond->curr_active_slave);
-}
-
 static int bond_option_active_slave_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
@@ -727,14 +719,14 @@ static int bond_option_active_slave_set(struct bonding *bond,
 
        if (slave_dev) {
                if (!netif_is_bond_slave(slave_dev)) {
-                       pr_err("Device %s is not bonding slave\n",
-                              slave_dev->name);
+                       netdev_err(bond->dev, "Device %s is not bonding slave\n",
+                                  slave_dev->name);
                        return -EINVAL;
                }
 
                if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
-                       pr_err("%s: Device %s is not our slave\n",
-                              bond->dev->name, slave_dev->name);
+                       netdev_err(bond->dev, "Device %s is not our slave\n",
+                                  slave_dev->name);
                        return -EINVAL;
                }
        }
@@ -744,29 +736,29 @@ static int bond_option_active_slave_set(struct bonding *bond,
 
        /* check to see if we are clearing active */
        if (!slave_dev) {
-               pr_info("%s: Clearing current active slave\n", bond->dev->name);
+               netdev_info(bond->dev, "Clearing current active slave\n");
                RCU_INIT_POINTER(bond->curr_active_slave, NULL);
                bond_select_active_slave(bond);
        } else {
-               struct slave *old_active = bond->curr_active_slave;
+               struct slave *old_active = bond_deref_active_protected(bond);
                struct slave *new_active = bond_slave_get_rtnl(slave_dev);
 
                BUG_ON(!new_active);
 
                if (new_active == old_active) {
                        /* do nothing */
-                       pr_info("%s: %s is already the current active slave\n",
-                               bond->dev->name, new_active->dev->name);
+                       netdev_info(bond->dev, "%s is already the current active slave\n",
+                                   new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
                            bond_slave_is_up(new_active)) {
-                               pr_info("%s: Setting %s as active slave\n",
-                                       bond->dev->name, new_active->dev->name);
+                               netdev_info(bond->dev, "Setting %s as active slave\n",
+                                           new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
                        } else {
-                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
-                                      bond->dev->name, new_active->dev->name,
-                                      new_active->dev->name);
+                               netdev_err(bond->dev, "Could not set %s as active slave; either %s is down or the link is down\n",
+                                          new_active->dev->name,
+                                          new_active->dev->name);
                                ret = -EINVAL;
                        }
                }
@@ -785,20 +777,17 @@ static int bond_option_active_slave_set(struct bonding *bond,
 static int bond_option_miimon_set(struct bonding *bond,
                                  const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting MII monitoring interval to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting MII monitoring interval to %llu\n",
+                   newval->value);
        bond->params.miimon = newval->value;
        if (bond->params.updelay)
-               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
-                       bond->dev->name,
+               netdev_info(bond->dev, "Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
                        bond->params.updelay * bond->params.miimon);
        if (bond->params.downdelay)
-               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
-                       bond->dev->name,
-                       bond->params.downdelay * bond->params.miimon);
+               netdev_info(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
+                           bond->params.downdelay * bond->params.miimon);
        if (newval->value && bond->params.arp_interval) {
-               pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
-                       bond->dev->name);
+               netdev_info(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
                bond->params.arp_interval = 0;
                if (bond->params.arp_validate)
                        bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
@@ -830,20 +819,18 @@ static int bond_option_updelay_set(struct bonding *bond,
        int value = newval->value;
 
        if (!bond->params.miimon) {
-               pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
-                      bond->dev->name);
+               netdev_err(bond->dev, "Unable to set up delay as MII monitoring is disabled\n");
                return -EPERM;
        }
        if ((value % bond->params.miimon) != 0) {
-               pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
-                       bond->dev->name, value,
-                       bond->params.miimon,
-                       (value / bond->params.miimon) *
-                       bond->params.miimon);
+               netdev_warn(bond->dev, "up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+                           value, bond->params.miimon,
+                           (value / bond->params.miimon) *
+                           bond->params.miimon);
        }
        bond->params.updelay = value / bond->params.miimon;
-       pr_info("%s: Setting up delay to %d\n",
-               bond->dev->name, bond->params.updelay * bond->params.miimon);
+       netdev_info(bond->dev, "Setting up delay to %d\n",
+                   bond->params.updelay * bond->params.miimon);
 
        return 0;
 }
@@ -854,20 +841,18 @@ static int bond_option_downdelay_set(struct bonding *bond,
        int value = newval->value;
 
        if (!bond->params.miimon) {
-               pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
-                      bond->dev->name);
+               netdev_err(bond->dev, "Unable to set down delay as MII monitoring is disabled\n");
                return -EPERM;
        }
        if ((value % bond->params.miimon) != 0) {
-               pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
-                       bond->dev->name, value,
-                       bond->params.miimon,
-                       (value / bond->params.miimon) *
-                       bond->params.miimon);
+               netdev_warn(bond->dev, "down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+                           value, bond->params.miimon,
+                           (value / bond->params.miimon) *
+                           bond->params.miimon);
        }
        bond->params.downdelay = value / bond->params.miimon;
-       pr_info("%s: Setting down delay to %d\n",
-               bond->dev->name, bond->params.downdelay * bond->params.miimon);
+       netdev_info(bond->dev, "Setting down delay to %d\n",
+                   bond->params.downdelay * bond->params.miimon);
 
        return 0;
 }
@@ -875,8 +860,8 @@ static int bond_option_downdelay_set(struct bonding *bond,
 static int bond_option_use_carrier_set(struct bonding *bond,
                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting use_carrier to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting use_carrier to %llu\n",
+                   newval->value);
        bond->params.use_carrier = newval->value;
 
        return 0;
@@ -889,18 +874,16 @@ static int bond_option_use_carrier_set(struct bonding *bond,
 static int bond_option_arp_interval_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ARP monitoring interval to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting ARP monitoring interval to %llu\n",
+                   newval->value);
        bond->params.arp_interval = newval->value;
        if (newval->value) {
                if (bond->params.miimon) {
-                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
-                               bond->dev->name, bond->dev->name);
+                       netdev_info(bond->dev, "ARP monitoring cannot be used with MII monitoring. Disabling MII monitoring\n");
                        bond->params.miimon = 0;
                }
                if (!bond->params.arp_targets[0])
-                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "ARP monitoring has been set up, but no ARP targets have been specified\n");
        }
        if (bond->dev->flags & IFF_UP) {
                /* If the interface is up, we may need to fire off
@@ -944,24 +927,24 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
        int ind;
 
        if (!bond_is_ip_target_ok(target)) {
-               pr_err("%s: invalid ARP target %pI4 specified for addition\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "invalid ARP target %pI4 specified for addition\n",
+                          &target);
                return -EINVAL;
        }
 
        if (bond_get_targets_ip(targets, target) != -1) { /* dup */
-               pr_err("%s: ARP target %pI4 is already present\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "ARP target %pI4 is already present\n",
+                          &target);
                return -EINVAL;
        }
 
        ind = bond_get_targets_ip(targets, 0); /* first free slot */
        if (ind == -1) {
-               pr_err("%s: ARP target table is full!\n", bond->dev->name);
+               netdev_err(bond->dev, "ARP target table is full!\n");
                return -EINVAL;
        }
 
-       pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
+       netdev_info(bond->dev, "Adding ARP target %pI4\n", &target);
 
        _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
 
@@ -989,23 +972,22 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
        int ind, i;
 
        if (!bond_is_ip_target_ok(target)) {
-               pr_err("%s: invalid ARP target %pI4 specified for removal\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "invalid ARP target %pI4 specified for removal\n",
+                          &target);
                return -EINVAL;
        }
 
        ind = bond_get_targets_ip(targets, target);
        if (ind == -1) {
-               pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "unable to remove nonexistent ARP target %pI4\n",
+                          &target);
                return -EINVAL;
        }
 
        if (ind == 0 && !targets[1] && bond->params.arp_interval)
-               pr_warn("%s: Removing last arp target with arp_interval on\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
 
-       pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
+       netdev_info(bond->dev, "Removing ARP target %pI4\n", &target);
 
        /* not to race with bond_arp_rcv */
        write_lock_bh(&bond->lock);
@@ -1044,8 +1026,8 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
 
        if (newval->string) {
                if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
-                       pr_err("%s: invalid ARP target %pI4 specified\n",
-                              bond->dev->name, &target);
+                       netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+                                  &target);
                        return ret;
                }
                if (newval->string[0] == '+')
@@ -1053,8 +1035,7 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
                else if (newval->string[0] == '-')
                        ret = bond_option_arp_ip_target_rem(bond, target);
                else
-                       pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "no command found in arp_ip_targets file - use +<addr> or -<addr>\n");
        } else {
                target = newval->value;
                ret = bond_option_arp_ip_target_add(bond, target);
@@ -1066,8 +1047,8 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
 static int bond_option_arp_validate_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting arp_validate to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
+                   newval->string, newval->value);
 
        if (bond->dev->flags & IFF_UP) {
                if (!newval->value)
@@ -1083,8 +1064,8 @@ static int bond_option_arp_validate_set(struct bonding *bond,
 static int bond_option_arp_all_targets_set(struct bonding *bond,
                                           const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting arp_all_targets to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.arp_all_targets = newval->value;
 
        return 0;
@@ -1106,7 +1087,7 @@ static int bond_option_primary_set(struct bonding *bond,
                *p = '\0';
        /* check to see if we are clearing primary */
        if (!strlen(primary)) {
-               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
+               netdev_info(bond->dev, "Setting primary slave to None\n");
                bond->primary_slave = NULL;
                memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
@@ -1115,8 +1096,8 @@ static int bond_option_primary_set(struct bonding *bond,
 
        bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
-                       pr_info("%s: Setting %s as primary slave\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "Setting %s as primary slave\n",
+                                   slave->dev->name);
                        bond->primary_slave = slave;
                        strcpy(bond->params.primary, slave->dev->name);
                        bond_select_active_slave(bond);
@@ -1125,15 +1106,15 @@ static int bond_option_primary_set(struct bonding *bond,
        }
 
        if (bond->primary_slave) {
-               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
+               netdev_info(bond->dev, "Setting primary slave to None\n");
                bond->primary_slave = NULL;
                bond_select_active_slave(bond);
        }
        strncpy(bond->params.primary, primary, IFNAMSIZ);
        bond->params.primary[IFNAMSIZ - 1] = 0;
 
-       pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
-               bond->dev->name, primary, bond->dev->name);
+       netdev_info(bond->dev, "Recording %s as primary, but it has not been enslaved to %s yet\n",
+                   primary, bond->dev->name);
 
 out:
        write_unlock_bh(&bond->curr_slave_lock);
@@ -1146,8 +1127,8 @@ out:
 static int bond_option_primary_reselect_set(struct bonding *bond,
                                            const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting primary_reselect to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting primary_reselect to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.primary_reselect = newval->value;
 
        block_netpoll_tx();
@@ -1162,8 +1143,8 @@ static int bond_option_primary_reselect_set(struct bonding *bond,
 static int bond_option_fail_over_mac_set(struct bonding *bond,
                                         const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting fail_over_mac to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.fail_over_mac = newval->value;
 
        return 0;
@@ -1172,8 +1153,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
 static int bond_option_xmit_hash_policy_set(struct bonding *bond,
                                            const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.xmit_policy = newval->value;
 
        return 0;
@@ -1182,8 +1163,8 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
 static int bond_option_resend_igmp_set(struct bonding *bond,
                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting resend_igmp to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting resend_igmp to %llu\n",
+                   newval->value);
        bond->params.resend_igmp = newval->value;
 
        return 0;
@@ -1221,8 +1202,8 @@ static int bond_option_all_slaves_active_set(struct bonding *bond,
 static int bond_option_min_links_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting min links value to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting min links value to %llu\n",
+                   newval->value);
        bond->params.min_links = newval->value;
 
        return 0;
@@ -1257,8 +1238,8 @@ static int bond_option_pps_set(struct bonding *bond,
 static int bond_option_lacp_rate_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting LACP rate to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting LACP rate to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.lacp_fast = newval->value;
        bond_3ad_update_lacp_rate(bond);
 
@@ -1268,8 +1249,8 @@ static int bond_option_lacp_rate_set(struct bonding *bond,
 static int bond_option_ad_select_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ad_select to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting ad_select to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.ad_select = newval->value;
 
        return 0;
@@ -1330,7 +1311,7 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
+       netdev_info(bond->dev, "invalid input for queue_id set\n");
        ret = -EPERM;
        goto out;
 
@@ -1352,20 +1333,20 @@ static int bond_option_slaves_set(struct bonding *bond,
 
        dev = __dev_get_by_name(dev_net(bond->dev), ifname);
        if (!dev) {
-               pr_info("%s: interface %s does not exist!\n",
-                       bond->dev->name, ifname);
+               netdev_info(bond->dev, "interface %s does not exist!\n",
+                           ifname);
                ret = -ENODEV;
                goto out;
        }
 
        switch (command[0]) {
        case '+':
-               pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
+               netdev_info(bond->dev, "Adding slave %s\n", dev->name);
                ret = bond_enslave(bond->dev, dev);
                break;
 
        case '-':
-               pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
+               netdev_info(bond->dev, "Removing slave %s\n", dev->name);
                ret = bond_release(bond->dev, dev);
                break;
 
@@ -1377,8 +1358,7 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
-              bond->dev->name);
+       netdev_err(bond->dev, "no command found in slaves file - use +ifname or -ifname\n");
        ret = -EPERM;
        goto out;
 }
@@ -1386,8 +1366,8 @@ err_no_cmd:
 static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
                                          const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting dynamic-lb to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.tlb_dynamic_lb = newval->value;
 
        return 0;
index b215b479bb3a6917ffd6d37f30f989996d176f5b..de62c0385dfb06309a5fa4b4aff3f524a099ebe3 100644 (file)
@@ -252,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
                                                    S_IRUGO, bn->proc_dir,
                                                    &bond_info_fops, bond);
                if (bond->proc_entry == NULL)
-                       pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
-                               DRV_NAME, bond_dev->name);
+                       netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n",
+                                   DRV_NAME, bond_dev->name);
                else
                        memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
        }
index daed52f68ce1614ec94772ad1628f09417e04bfe..98db8edd9c755c55d04a26b0b40143ba80d15a55 100644 (file)
@@ -492,8 +492,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
                                       char *buf)
 {
        struct bonding *bond = to_bond(d);
+       bool active = !!rcu_access_pointer(bond->curr_active_slave);
 
-       return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
+       return sprintf(buf, "%s\n", active ? "up" : "down");
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
index 0b4d9cde0b05e33a34e5deaec3b28126f1cba8e1..aace510d08d1dae8b300528ab703452bbe5c99e5 100644 (file)
@@ -36,7 +36,6 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
-#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
@@ -194,8 +193,8 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   slave *curr_active_slave;
-       struct   slave *current_arp_slave;
+       struct   slave __rcu *curr_active_slave;
+       struct   slave __rcu *current_arp_slave;
        struct   slave *primary_slave;
        bool     force_primary;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
@@ -232,6 +231,10 @@ struct bonding {
 #define bond_slave_get_rtnl(dev) \
        ((struct slave *) rtnl_dereference(dev->rx_handler_data))
 
+#define bond_deref_active_protected(bond)                                 \
+       rcu_dereference_protected(bond->curr_active_slave,                 \
+                                 lockdep_is_held(&bond->curr_slave_lock))
+
 struct bond_vlan_tag {
        __be16          vlan_proto;
        unsigned short  vlan_id;
@@ -265,6 +268,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
               BOND_MODE(bond) == BOND_MODE_ALB;
 }
 
+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+{
+       return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
+              (bond->params.tlb_dynamic_lb == 0);
+}
+
 static inline bool bond_mode_uses_arp(int mode)
 {
        return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
@@ -514,11 +523,10 @@ unsigned int bond_get_num_tx_queues(void);
 int bond_netlink_init(void);
 void bond_netlink_fini(void);
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
-struct net_device *bond_option_active_slave_get(struct bonding *bond);
 const char *bond_slave_link_status(s8 link);
-bool bond_verify_device_path(struct net_device *start_dev,
-                            struct net_device *end_dev,
-                            struct bond_vlan_tag *tags);
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+                                             struct net_device *end_dev,
+                                             int level);
 
 #ifdef CONFIG_PROC_FS
 void bond_create_proc_entry(struct bonding *bond);
index fc73865bb83a705f9fa1d1f3d70e1c6191952b9b..27bbc56de15fa37497676f8b389cdc17eced09a0 100644 (file)
@@ -349,7 +349,8 @@ static int ldisc_open(struct tty_struct *tty)
        result = snprintf(name, sizeof(name), "cf%s", tty->name);
        if (result >= IFNAMSIZ)
                return -EINVAL;
-       dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+       dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
+                          caifdev_setup);
        if (!dev)
                return -ENOMEM;
 
index ff54c0eb2052137d8f1561448f3c654f34d354df..72ea9ff9bb9c02ae16133de4b12f83e70ec97c0d 100644 (file)
@@ -730,8 +730,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
        int res;
        dev = (struct cfspi_dev *)pdev->dev.platform_data;
 
-       ndev = alloc_netdev(sizeof(struct cfspi),
-                       "cfspi%d", cfspi_setup);
+       ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
+                           NET_NAME_UNKNOWN, cfspi_setup);
        if (!dev)
                return -ENODEV;
 
index 985608634f8cccf9eefd32328c96fbbcb2ae0079..a5fefb9059c592aa5134302ba79f411a2874d65a 100644 (file)
@@ -661,7 +661,7 @@ static int cfv_probe(struct virtio_device *vdev)
        int err = -EINVAL;
 
        netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
-                             cfv_netdev_setup);
+                             NET_NAME_UNKNOWN, cfv_netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
index 824108cd9fd594a91c25b0b4a1d43d3341ad9a31..e29b6d05110346855e8f2cb41abd9ae4c4bf59e6 100644 (file)
@@ -208,40 +208,31 @@ static int c_can_plat_probe(struct platform_device *pdev)
        }
 
        /* get the appropriate clk */
-       clk = clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
-               dev_err(&pdev->dev, "no clock defined\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(clk);
                goto exit;
        }
 
        /* get the platform data */
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!mem || irq <= 0) {
+       if (irq <= 0) {
                ret = -ENODEV;
-               goto exit_free_clk;
-       }
-
-       if (!request_mem_region(mem->start, resource_size(mem),
-                               KBUILD_MODNAME)) {
-               dev_err(&pdev->dev, "resource unavailable\n");
-               ret = -ENODEV;
-               goto exit_free_clk;
+               goto exit;
        }
 
-       addr = ioremap(mem->start, resource_size(mem));
-       if (!addr) {
-               dev_err(&pdev->dev, "failed to map can port\n");
-               ret = -ENOMEM;
-               goto exit_release_mem;
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       addr = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(addr)) {
+               ret =  PTR_ERR(addr);
+               goto exit;
        }
 
        /* allocate the c_can device */
        dev = alloc_c_can_dev();
        if (!dev) {
                ret = -ENOMEM;
-               goto exit_iounmap;
+               goto exit;
        }
 
        priv = netdev_priv(dev);
@@ -321,12 +312,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
 
 exit_free_device:
        free_c_can_dev(dev);
-exit_iounmap:
-       iounmap(addr);
-exit_release_mem:
-       release_mem_region(mem->start, resource_size(mem));
-exit_free_clk:
-       clk_put(clk);
 exit:
        dev_err(&pdev->dev, "probe failed\n");
 
@@ -336,18 +321,10 @@ exit:
 static int c_can_plat_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
-       struct c_can_priv *priv = netdev_priv(dev);
-       struct resource *mem;
 
        unregister_c_can_dev(dev);
 
        free_c_can_dev(dev);
-       iounmap(priv->base);
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(mem->start, resource_size(mem));
-
-       clk_put(priv->priv);
 
        return 0;
 }
index e318e87e2bfc00ba9e32aa08858de5f5c1629dcf..9f91fcba43f8718f4d546e39faaef1aa3bd9fabf 100644 (file)
@@ -565,7 +565,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
        else
                size = sizeof_priv;
 
-       dev = alloc_netdev(size, "can%d", can_setup);
+       dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup);
        if (!dev)
                return NULL;
 
index f31499a32d7dcf4fde4eb1a0864af4f15bfb6118..d1692154ed1b094ab100bc40ddcc2783d1dfdac7 100644 (file)
@@ -141,6 +141,7 @@ static void set_normal_mode(struct net_device *dev)
 {
        struct sja1000_priv *priv = netdev_priv(dev);
        unsigned char status = priv->read_reg(priv, SJA1000_MOD);
+       u8 mod_reg_val = 0x00;
        int i;
 
        for (i = 0; i < 100; i++) {
@@ -158,9 +159,10 @@ static void set_normal_mode(struct net_device *dev)
 
                /* set chip to normal mode */
                if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
-                       priv->write_reg(priv, SJA1000_MOD, MOD_LOM);
-               else
-                       priv->write_reg(priv, SJA1000_MOD, 0x00);
+                       mod_reg_val |= MOD_LOM;
+               if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+                       mod_reg_val |= MOD_STM;
+               priv->write_reg(priv, SJA1000_MOD, mod_reg_val);
 
                udelay(10);
 
@@ -278,6 +280,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
        uint8_t dlc;
        canid_t id;
        uint8_t dreg;
+       u8 cmd_reg_val = 0x00;
        int i;
 
        if (can_dropped_invalid_skb(dev, skb))
@@ -312,9 +315,14 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
        can_put_echo_skb(skb, dev, 0);
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
-               sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
+               cmd_reg_val |= CMD_AT;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               cmd_reg_val |= CMD_SRR;
        else
-               sja1000_write_cmdreg(priv, CMD_TR);
+               cmd_reg_val |= CMD_TR;
+
+       sja1000_write_cmdreg(priv, cmd_reg_val);
 
        return NETDEV_TX_OK;
 }
@@ -622,9 +630,12 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
        priv->can.do_set_bittiming = sja1000_set_bittiming;
        priv->can.do_set_mode = sja1000_set_mode;
        priv->can.do_get_berr_counter = sja1000_get_berr_counter;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
-               CAN_CTRLMODE_ONE_SHOT;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+                                      CAN_CTRLMODE_LISTENONLY |
+                                      CAN_CTRLMODE_3_SAMPLES |
+                                      CAN_CTRLMODE_ONE_SHOT |
+                                      CAN_CTRLMODE_BERR_REPORTING |
+                                      CAN_CTRLMODE_PRESUME_ACK;
 
        spin_lock_init(&priv->cmdreg_lock);
 
index ea4d4f1a6411011f837b9da12b94d731a8a960a5..acb5b92ace92da17f55a5d892e90e7d7530a1952 100644 (file)
@@ -529,7 +529,7 @@ static struct slcan *slc_alloc(dev_t line)
                return NULL;
 
        sprintf(name, "slcan%d", i);
-       dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+       dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup);
        if (!dev)
                return NULL;
 
index 29e272cc7a984f79cc3d6815b76deacb18b70abb..64c016a99af80b0bf20114b98193e4544d598630 100644 (file)
@@ -1496,7 +1496,6 @@ e100_set_config(struct net_device *dev, struct ifmap *map)
                case IF_PORT_AUI:
                        spin_unlock(&np->lock);
                        return -EOPNOTSUPP;
-                       break;
                default:
                        printk(KERN_ERR "%s: Invalid media selected", dev->name);
                        spin_unlock(&np->lock);
index 0932ffbf381b5b5c877b9988318912fcea930473..ff435fbd1ad0e8b32a8299211a890b1f5b9deb31 100644 (file)
@@ -164,7 +164,7 @@ static int __init dummy_init_one(void)
        struct net_device *dev_dummy;
        int err;
 
-       dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
+       dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
        if (!dev_dummy)
                return -ENOMEM;
 
index 7a79b60468796a88795af28e655a94acf17f981f..957e5c0cede337cff8d98c81089e879d83988dcb 100644 (file)
@@ -585,7 +585,8 @@ static int __init eql_init_module(void)
 
        pr_info("%s\n", version);
 
-       dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
+       dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
+                              eql_setup);
        if (!dev_eql)
                return -ENOMEM;
 
index 599311f0e05c18eccad98511b3d7d73737dbd1d5..b96e8852b2d195109b92ff6dd41a2b04a02d5389 100644 (file)
@@ -986,7 +986,7 @@ static void ethdev_setup(struct net_device *dev)
 static struct net_device *____alloc_ei_netdev(int size)
 {
        return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
-                               ethdev_setup);
+                           NET_NAME_UNKNOWN, ethdev_setup);
 }
 
 
index 90e825e8abfee9aeb4d743fab9be26ca7dd109ec..65cf60f6718c52fae6ebacdf3e1e4cf5444383d2 100644 (file)
@@ -178,10 +178,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                case NUBUS_DRHW_APPLE_SONIC_LC:
                case NUBUS_DRHW_SONNET:
                        return MAC8390_NONE;
-                       break;
                default:
                        return MAC8390_APPLE;
-                       break;
                }
                break;
 
@@ -189,13 +187,10 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                switch (dev->dr_hw) {
                case NUBUS_DRHW_ASANTE_LC:
                        return MAC8390_NONE;
-                       break;
                case NUBUS_DRHW_CABLETRON:
                        return MAC8390_CABLETRON;
-                       break;
                default:
                        return MAC8390_APPLE;
-                       break;
                }
                break;
 
@@ -220,10 +215,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                switch (dev->dr_hw) {
                case NUBUS_DRHW_INTERLAN:
                        return MAC8390_INTERLAN;
-                       break;
                default:
                        return MAC8390_KINETICS;
-                       break;
                }
                break;
 
@@ -563,7 +556,6 @@ static int __init mac8390_initdev(struct net_device *dev,
                case ACCESS_UNKNOWN:
                        pr_err("Don't know how to access card memory!\n");
                        return -ENODEV;
-                       break;
 
                case ACCESS_16:
                        /* 16 bit card, register map is reversed */
index bbaf36d9f5e1cfbb7649ec13d5eb00595543a5b4..6e314dbba80505e353b01c2f6e0052ca407f8231 100644 (file)
@@ -182,6 +182,8 @@ config AMD_XGBE
        depends on OF_NET
        select PHYLIB
        select AMD_XGBE_PHY
+       select BITREVERSE
+       select CRC32
        ---help---
          This driver supports the AMD 10GbE Ethernet device found on an
          AMD SoC.
index 068dc7cad5fa3c511c34b034c006add6284bb097..841e6558db682757025e6284cd37d12c81e24039 100644 (file)
@@ -101,7 +101,6 @@ Revision History:
 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
 MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 module_param_array(speed_duplex, int, NULL, 0);
 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
 module_param_array(coalesce, bool, NULL, 0);
@@ -109,17 +108,9 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
 module_param_array(dynamic_ipg, bool, NULL, 0);
 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
 
-static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
-
-       { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { 0, }
-
-};
-/*
-This function will read the PHY registers.
-*/
-static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
+/* This function will read the PHY registers. */
+static int amd8111e_read_phy(struct amd8111e_priv *lp,
+                            int phy_id, int reg, u32 *val)
 {
        void __iomem *mmio = lp->mmio;
        unsigned int reg_val;
@@ -146,10 +137,9 @@ err_phy_read:
 
 }
 
-/*
-This function will write into PHY registers.
-*/
-static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
+/* This function will write into PHY registers. */
+static int amd8111e_write_phy(struct amd8111e_priv *lp,
+                             int phy_id, int reg, u32 val)
 {
        unsigned int repeat = REPEAT_CNT;
        void __iomem *mmio = lp->mmio;
@@ -176,12 +166,11 @@ err_phy_write:
        return -EINVAL;
 
 }
-/*
-This is the mii register read function provided to the mii interface.
-*/
-static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
+
+/* This is the mii register read function provided to the mii interface. */
+static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        unsigned int reg_val;
 
        amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
@@ -189,19 +178,18 @@ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
 
 }
 
-/*
-This is the mii register write function provided to the mii interface.
-*/
-static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
+/* This is the mii register write function provided to the mii interface. */
+static void amd8111e_mdio_write(struct net_device *dev,
+                               int phy_id, int reg_num, int val)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
 
        amd8111e_write_phy(lp, phy_id, reg_num, val);
 }
 
-/*
-This function will set PHY speed. During initialization sets the original speed to 100 full.
-*/
+/* This function will set PHY speed. During initialization sets
+ * the original speed to 100 full
+ */
 static void amd8111e_set_ext_phy(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -240,14 +228,13 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
 
 }
 
-/*
-This function will unmap skb->data space and will free
-all transmit and receive skbuffs.
-*/
+/* This function will unmap skb->data space and will free
+ * all transmit and receive skbuffs.
+ */
 static int amd8111e_free_skbs(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct sk_buffrx_skbuff;
+       struct sk_buff *rx_skbuff;
        int i;
 
        /* Freeing transmit skbs */
@@ -274,18 +261,18 @@ static int amd8111e_free_skbs(struct net_device *dev)
        return 0;
 }
 
-/*
-This will set the receive buffer length corresponding to the mtu size of networkinterface.
-*/
-static inline void amd8111e_set_rx_buff_len(struct net_devicedev)
+/* This will set the receive buffer length corresponding
+ * to the mtu size of networkinterface.
+ */
+static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        unsigned int mtu = dev->mtu;
 
        if (mtu > ETH_DATA_LEN){
                /* MTU + ethernet header + FCS
-               + optional VLAN tag + skb reserve space 2 */
-
+                * + optional VLAN tag + skb reserve space 2
+                */
                lp->rx_buff_len = mtu + ETH_HLEN + 10;
                lp->options |= OPTION_JUMBO_ENABLE;
        } else{
@@ -294,8 +281,10 @@ static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
        }
 }
 
-/*
-This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
+/* This function will free all the previously allocated buffers,
+ * determine new receive buffer length  and will allocate new receive buffers.
+ * This function also allocates and initializes both the transmitter
+ * and receive hardware descriptors.
  */
 static int amd8111e_init_ring(struct net_device *dev)
 {
@@ -376,15 +365,18 @@ err_free_tx_ring:
 err_no_mem:
        return -ENOMEM;
 }
-/* This function will set the interrupt coalescing according to the input arguments */
-static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
+
+/* This function will set the interrupt coalescing according
+ * to the input arguments
+ */
+static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
 {
        unsigned int timeout;
        unsigned int event_count;
 
        struct amd8111e_priv *lp = netdev_priv(dev);
        void __iomem *mmio = lp->mmio;
-       struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+       struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
 
 
        switch(cmod)
@@ -435,9 +427,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
 
 }
 
-/*
-This function initializes the device registers  and starts the device.
-*/
+/* This function initializes the device registers  and starts the device. */
 static int amd8111e_restart(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -501,8 +491,7 @@ static int amd8111e_restart(struct net_device *dev)
 
        /* Enable interrupt coalesce */
        if(lp->options & OPTION_INTR_COAL_ENABLE){
-               printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
-                                                               dev->name);
+               netdev_info(dev, "Interrupt Coalescing Enabled.\n");
                amd8111e_set_coalesce(dev,ENABLE_COAL);
        }
 
@@ -514,10 +503,9 @@ static int amd8111e_restart(struct net_device *dev)
        readl(mmio+CMD0);
        return 0;
 }
-/*
-This function clears necessary the device registers.
-*/
-static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
+
+/* This function clears necessary the device registers. */
+static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
 {
        unsigned int reg_val;
        unsigned int logic_filter[2] ={0,};
@@ -587,7 +575,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
        writew(MIB_CLEAR, mmio + MIB_ADDR);
 
        /* Clear LARF */
-       amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
+       amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
 
        /* SRAM_SIZE register */
        reg_val = readl(mmio + SRAM_SIZE);
@@ -605,11 +593,10 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
 
 }
 
-/*
-This function disables the interrupt and clears all the pending
-interrupts in INT0
+/* This function disables the interrupt and clears all the pending
+ * interrupts in INT0
  */
-static void amd8111e_disable_interrupt(struct amd8111e_privlp)
+static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
 {
        u32 intr0;
 
@@ -625,10 +612,8 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
 
 }
 
-/*
-This function stops the chip.
-*/
-static void amd8111e_stop_chip(struct amd8111e_priv* lp)
+/* This function stops the chip. */
+static void amd8111e_stop_chip(struct amd8111e_priv *lp)
 {
        writel(RUN, lp->mmio + CMD0);
 
@@ -636,10 +621,8 @@ static void amd8111e_stop_chip(struct amd8111e_priv* lp)
        readl(lp->mmio + CMD0);
 }
 
-/*
-This function frees the  transmiter and receiver descriptor rings.
-*/
-static void amd8111e_free_ring(struct amd8111e_priv* lp)
+/* This function frees the  transmiter and receiver descriptor rings. */
+static void amd8111e_free_ring(struct amd8111e_priv *lp)
 {
        /* Free transmit and receive descriptor rings */
        if(lp->rx_ring){
@@ -659,12 +642,13 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp)
 
 }
 
-/*
-This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
-*/
+/* This function will free all the transmit skbs that are actually
+ * transmitted by the device. It will check the ownership of the
+ * skb before freeing the skb.
+ */
 static int amd8111e_tx(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
        int status;
        /* Complete all the transmit packet */
@@ -724,21 +708,20 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                goto rx_not_empty;
 
        do{
-               /* process receive packets until we use the quota*/
-               /* If we own the next entry, it's a new packet. Send it up. */
+               /* process receive packets until we use the quota.
+                * If we own the next entry, it's a new packet. Send it up.
+                */
                while(1) {
                        status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
                        if (status & OWN_BIT)
                                break;
 
-                       /*
-                        * There is a tricky error noted by John Murphy,
+                       /* There is a tricky error noted by John Murphy,
                         * <murf@perftech.com> to Russ Nelson: Even with
                         * full-sized * buffers it's possible for a
                         * jabber packet to use two buffers, with only
                         * the last correctly noting the error.
                         */
-
                        if(status & ERR_BIT) {
                                /* reseting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
@@ -771,7 +754,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                        new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
                        if (!new_skb) {
                                /* if allocation fail,
-                                  ignore that pkt and go to next one */
+                                * ignore that pkt and go to next one
+                                */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                lp->drv_rx_errors++;
                                goto err_next_pkt;
@@ -812,8 +796,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                        rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
                }
                /* Check the interrupt status register for more packets in the
-                  mean time. Process them since we have not used up our quota.*/
-
+                * mean time. Process them since we have not used up our quota.
+                */
                intr0 = readl(mmio + INT0);
                /*Ack receive packets */
                writel(intr0 & RINT0,mmio + INT0);
@@ -833,10 +817,8 @@ rx_not_empty:
        return num_rx_pkt;
 }
 
-/*
-This function will indicate the link status to the kernel.
-*/
-static int amd8111e_link_change(struct net_device* dev)
+/* This function will indicate the link status to the kernel. */
+static int amd8111e_link_change(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        int status0,speed;
@@ -860,24 +842,26 @@ static int amd8111e_link_change(struct net_device* dev)
                else if(speed == PHY_SPEED_100)
                        lp->link_config.speed = SPEED_100;
 
-               printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
-                      (lp->link_config.speed == SPEED_100) ? "100": "10",
-                      (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
+               netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
+                           (lp->link_config.speed == SPEED_100) ?
+                                                       "100" : "10",
+                           (lp->link_config.duplex == DUPLEX_FULL) ?
+                                                       "Full" : "Half");
+
                netif_carrier_on(dev);
        }
        else{
                lp->link_config.speed = SPEED_INVALID;
                lp->link_config.duplex = DUPLEX_INVALID;
                lp->link_config.autoneg = AUTONEG_INVALID;
-               printk(KERN_INFO "%s: Link is Down.\n",dev->name);
+               netdev_info(dev, "Link is Down.\n");
                netif_carrier_off(dev);
        }
 
        return 0;
 }
-/*
-This function reads the mib counters.
-*/
+
+/* This function reads the mib counters. */
 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
 {
        unsigned int  status;
@@ -895,8 +879,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
        return data;
 }
 
-/*
- * This function reads the mib registers and returns the hardware statistics.
+/* This function reads the mib registers and returns the hardware statistics.
  * It updates previous internal driver statistics with new values.
  */
 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
@@ -992,13 +975,14 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
 
        return new_stats;
 }
+
 /* This function recalculate the interrupt coalescing  mode on every interrupt
-according to the datarate and the packet rate.
-*/
+ * according to the datarate and the packet rate.
+ */
 static int amd8111e_calc_coalesce(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+       struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
        int tx_pkt_rate;
        int rx_pkt_rate;
        int tx_data_rate;
@@ -1126,13 +1110,14 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
        return 0;
 
 }
-/*
-This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
-*/
+
+/* This is device interrupt function. It handles transmit,
+ * receive,link change and hardware timer interrupts.
+ */
 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
 {
 
-       struct net_device * dev = (struct net_device *) dev_id;
+       struct net_device *dev = (struct net_device *)dev_id;
        struct amd8111e_priv *lp = netdev_priv(dev);
        void __iomem *mmio = lp->mmio;
        unsigned int intr0, intren0;
@@ -1168,7 +1153,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
                        /* Schedule a polling routine */
                        __napi_schedule(&lp->napi);
                } else if (intren0 & RINTEN0) {
-                       printk("************Driver bug! interrupt while in poll\n");
+                       netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
                        /* Fix by disable receive interrupts */
                        writel(RINTEN0, mmio + INTEN0);
                }
@@ -1205,10 +1190,11 @@ static void amd8111e_poll(struct net_device *dev)
 #endif
 
 
-/*
-This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
-*/
-static int amd8111e_close(struct net_device * dev)
+/* This function closes the network interface and updates
+ * the statistics so that most recent statistics will be
+ * available after the interface is down.
+ */
+static int amd8111e_close(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        netif_stop_queue(dev);
@@ -1238,9 +1224,11 @@ static int amd8111e_close(struct net_device * dev)
        lp->opened = 0;
        return 0;
 }
-/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
-*/
-static int amd8111e_open(struct net_device * dev )
+
+/* This function opens new interface.It requests irq for the device,
+ * initializes the device,buffers and descriptors, and starts the device.
+ */
+static int amd8111e_open(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
 
@@ -1264,7 +1252,7 @@ static int amd8111e_open(struct net_device * dev )
        /* Start ipg timer */
        if(lp->options & OPTION_DYN_IPG_ENABLE){
                add_timer(&lp->ipg_data.ipg_timer);
-               printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
+               netdev_info(dev, "Dynamic IPG Enabled\n");
        }
 
        lp->opened = 1;
@@ -1275,10 +1263,11 @@ static int amd8111e_open(struct net_device * dev )
 
        return 0;
 }
-/*
-This function checks if there is any transmit  descriptors available to queue more packet.
-*/
-static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
+
+/* This function checks if there is any transmit  descriptors
+ * available to queue more packet.
+ */
+static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
 {
        int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
        if (lp->tx_skbuff[tx_index])
@@ -1287,12 +1276,14 @@ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
                return 0;
 
 }
-/*
-This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
-*/
 
+/* This function will queue the transmit packets to the
+ * descriptors and will trigger the send operation. It also
+ * initializes the transmit descriptors with buffer physical address,
+ * byte count, ownership to hardware etc.
+ */
 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
-                                      struct net_device * dev)
+                                      struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        int tx_index;
@@ -1338,9 +1329,7 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
        spin_unlock_irqrestore(&lp->lock, flags);
        return NETDEV_TX_OK;
 }
-/*
-This function returns all the memory mapped registers of the device.
-*/
+/* This function returns all the memory mapped registers of the device. */
 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
 {
        void __iomem *mmio = lp->mmio;
@@ -1361,10 +1350,9 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
 }
 
 
-/*
-This function sets promiscuos mode, all-multi mode or the multicast address
-list to the device.
-*/
+/* This function sets promiscuos mode, all-multi mode or the multicast address
+ * list to the device.
+ */
 static void amd8111e_set_multicast_list(struct net_device *dev)
 {
        struct netdev_hw_addr *ha;
@@ -1383,14 +1371,14 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
                /* get all multicast packet */
                mc_filter[1] = mc_filter[0] = 0xffffffff;
                lp->options |= OPTION_MULTICAST_ENABLE;
-               amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+               amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
                return;
        }
        if (netdev_mc_empty(dev)) {
                /* get only own packets */
                mc_filter[1] = mc_filter[0] = 0;
                lp->options &= ~OPTION_MULTICAST_ENABLE;
-               amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+               amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
                /* disable promiscuous mode */
                writel(PROM, lp->mmio + CMD2);
                return;
@@ -1402,14 +1390,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
                bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
                mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
        }
-       amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
+       amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
 
        /* To eliminate PCI posting bug */
        readl(lp->mmio + CMD2);
 
 }
 
-static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
+static void amd8111e_get_drvinfo(struct net_device *dev,
+                                struct ethtool_drvinfo *info)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        struct pci_dev *pci_dev = lp->pci_dev;
@@ -1501,11 +1490,11 @@ static const struct ethtool_ops ops = {
        .set_wol = amd8111e_set_wol,
 };
 
-/*
-This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
-*/
-
-static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
+/* This function handles all the  ethtool ioctls. It gives driver info,
+ * gets/sets driver speed, gets memory mapped register values, forces
+ * auto negotiation, sets/gets WOL options for ethtool application.
+ */
+static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *data = if_mii(ifr);
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1559,9 +1548,9 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-/*
-This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
-*/
+/* This function changes the mtu of the device. It restarts the device  to
+ * initialize the descriptor with new receive buffers.
+ */
 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1572,7 +1561,8 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
 
        if (!netif_running(dev)) {
                /* new_mtu will be used
-                  when device starts netxt time */
+                * when device starts netxt time
+                */
                dev->mtu = new_mtu;
                return 0;
        }
@@ -1591,7 +1581,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
        return err;
 }
 
-static int amd8111e_enable_magicpkt(struct amd8111e_privlp)
+static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
 {
        writel( VAL1|MPPLBA, lp->mmio + CMD3);
        writel( VAL0|MPEN_SW, lp->mmio + CMD7);
@@ -1601,7 +1591,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
        return 0;
 }
 
-static int amd8111e_enable_link_change(struct amd8111e_privlp)
+static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
 {
 
        /* Adapter is already stoped/suspended/interrupt-disabled */
@@ -1612,19 +1602,18 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
        return 0;
 }
 
-/*
- * This function is called when a packet transmission fails to complete
+/* This function is called when a packet transmission fails to complete
  * within a reasonable period, on the assumption that an interrupt have
  * failed or the interface is locked up. This function will reinitialize
  * the hardware.
  */
 static void amd8111e_tx_timeout(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        int err;
 
-       printk(KERN_ERR "%s: transmit timed out, resetting\n",
-                                                     dev->name);
+       netdev_err(dev, "transmit timed out, resetting\n");
+
        spin_lock_irq(&lp->lock);
        err = amd8111e_restart(dev);
        spin_unlock_irq(&lp->lock);
@@ -1701,22 +1690,10 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
        return 0;
 }
 
-
-static void amd8111e_remove_one(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       if (dev) {
-               unregister_netdev(dev);
-               iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
-               free_netdev(dev);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-       }
-}
-static void amd8111e_config_ipg(struct net_device* dev)
+static void amd8111e_config_ipg(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct ipg_infoipg_data = &lp->ipg_data;
+       struct ipg_info *ipg_data = &lp->ipg_data;
        void __iomem *mmio = lp->mmio;
        unsigned int prev_col_cnt = ipg_data->col_cnt;
        unsigned int total_col_cnt;
@@ -1814,27 +1791,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 {
        int err, i;
        unsigned long reg_addr,reg_len;
-       struct amd8111e_privlp;
-       struct net_devicedev;
+       struct amd8111e_priv *lp;
+       struct net_device *dev;
 
        err = pci_enable_device(pdev);
        if(err){
-               printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
-                       "exiting.\n");
+               dev_err(&pdev->dev, "Cannot enable new PCI device\n");
                return err;
        }
 
        if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
-               printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "Cannot find PCI base address\n");
                err = -ENODEV;
                goto err_disable_pdev;
        }
 
        err = pci_request_regions(pdev, MODULE_NAME);
        if(err){
-               printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
                goto err_disable_pdev;
        }
 
@@ -1842,16 +1816,14 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        /* Find power-management capability. */
        if (!pdev->pm_cap) {
-               printk(KERN_ERR "amd8111e: No Power Management capability, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "No Power Management capability\n");
                err = -ENODEV;
                goto err_free_reg;
        }
 
        /* Initialize DMA */
        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
-               printk(KERN_ERR "amd8111e: DMA not supported,"
-                       "exiting.\n");
+               dev_err(&pdev->dev, "DMA not supported\n");
                err = -ENODEV;
                goto err_free_reg;
        }
@@ -1878,10 +1850,9 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        spin_lock_init(&lp->lock);
 
-       lp->mmio = ioremap(reg_addr, reg_len);
+       lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
        if (!lp->mmio) {
-               printk(KERN_ERR "amd8111e: Cannot map device registers, "
-                      "exiting\n");
+               dev_err(&pdev->dev, "Cannot map device registers\n");
                err = -ENOMEM;
                goto err_free_dev;
        }
@@ -1923,9 +1894,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        err = register_netdev(dev);
        if (err) {
-               printk(KERN_ERR "amd8111e: Cannot register net device, "
-                      "exiting.\n");
-               goto err_iounmap;
+               dev_err(&pdev->dev, "Cannot register net device\n");
+               goto err_free_dev;
        }
 
        pci_set_drvdata(pdev, dev);
@@ -1942,21 +1912,17 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
        }
 
        /*  display driver and device information */
-
        chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
-       printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
-              dev->name,MODULE_VERS);
-       printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
-              dev->name, chip_version, dev->dev_addr);
+       dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
+       dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+                chip_version, dev->dev_addr);
        if (lp->ext_phy_id)
-               printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
-                      dev->name, lp->ext_phy_id, lp->ext_phy_addr);
+               dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
+                        lp->ext_phy_id, lp->ext_phy_addr);
        else
-               printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
-                      dev->name);
+               dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
+
        return 0;
-err_iounmap:
-       iounmap(lp->mmio);
 
 err_free_dev:
        free_netdev(dev);
@@ -1970,6 +1936,29 @@ err_disable_pdev:
 
 }
 
+static void amd8111e_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               unregister_netdev(dev);
+               free_netdev(dev);
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+       }
+}
+
+static const struct pci_device_id amd8111e_pci_tbl[] = {
+       {
+        .vendor = PCI_VENDOR_ID_AMD,
+        .device = PCI_DEVICE_ID_AMD8111E_7462,
+       },
+       {
+        .vendor = 0,
+       }
+};
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+
 static struct pci_driver amd8111e_driver = {
        .name           = MODULE_NAME,
        .id_table       = amd8111e_pci_tbl,
index 57397295887c964b23f2bbb7cf7d25d908ab2b29..b584b78237dfdbf9ae50618c2b992459137b2739 100644 (file)
@@ -475,7 +475,7 @@ static void lance_init_ring(struct net_device *dev)
        *lib_ptr(ib, rx_ptr, lp->type) = leptr;
        if (ZERO)
                printk("RX ptr: %8.8x(%8.8x)\n",
-                      leptr, lib_off(brx_ring, lp->type));
+                      leptr, (uint)lib_off(brx_ring, lp->type));
 
        /* Setup tx descriptor pointer */
        leptr = offsetof(struct lance_init_block, btx_ring);
@@ -484,7 +484,7 @@ static void lance_init_ring(struct net_device *dev)
        *lib_ptr(ib, tx_ptr, lp->type) = leptr;
        if (ZERO)
                printk("TX ptr: %8.8x(%8.8x)\n",
-                      leptr, lib_off(btx_ring, lp->type));
+                      leptr, (uint)lib_off(btx_ring, lp->type));
 
        if (ZERO)
                printk("TX rings:\n");
@@ -499,8 +499,8 @@ static void lance_init_ring(struct net_device *dev)
                                                /* The ones required by tmd2 */
                *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n",
-                              i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
+                       printk("%d: %8.8x(%p)\n",
+                              i, leptr, lp->tx_buf_ptr_cpu[i]);
        }
 
        /* Setup the Rx ring entries */
@@ -516,8 +516,8 @@ static void lance_init_ring(struct net_device *dev)
                                                             0xf000;
                *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n",
-                              i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
+                       printk("%d: %8.8x(%p)\n",
+                              i, leptr, lp->rx_buf_ptr_cpu[i]);
        }
        iob();
 }
index bf462ee86f5cdc7236fc3e44413dfaceaae9e38f..7ec80ac7043f7d3d1ce3274758667a2aa9df7bff 100644 (file)
 #define DMA_MR_SWR_WIDTH               1
 #define DMA_SBMR_EAME_INDEX            11
 #define DMA_SBMR_EAME_WIDTH            1
+#define DMA_SBMR_BLEN_256_INDEX                7
+#define DMA_SBMR_BLEN_256_WIDTH                1
 #define DMA_SBMR_UNDEF_INDEX           0
 #define DMA_SBMR_UNDEF_WIDTH           1
 
 #define MAC_PFR                                0x0008
 #define MAC_WTR                                0x000c
 #define MAC_HTR0                       0x0010
-#define MAC_HTR1                       0x0014
-#define MAC_HTR2                       0x0018
-#define MAC_HTR3                       0x001c
-#define MAC_HTR4                       0x0020
-#define MAC_HTR5                       0x0024
-#define MAC_HTR6                       0x0028
-#define MAC_HTR7                       0x002c
 #define MAC_VLANTR                     0x0050
 #define MAC_VLANHTR                    0x0058
 #define MAC_VLANIR                     0x0060
 
 #define MAC_QTFCR_INC                  4
 #define MAC_MACA_INC                   4
+#define MAC_HTR_INC                    4
 
 /* MAC register entry bit positions and sizes */
 #define MAC_HWF0R_ADDMACADRSEL_INDEX   18
 #define MAC_MACA1HR_AE_WIDTH           1
 #define MAC_PFR_HMC_INDEX              2
 #define MAC_PFR_HMC_WIDTH              1
+#define MAC_PFR_HPF_INDEX              10
+#define MAC_PFR_HPF_WIDTH              1
 #define MAC_PFR_HUC_INDEX              1
 #define MAC_PFR_HUC_WIDTH              1
 #define MAC_PFR_PM_INDEX               4
 #define MAC_PFR_PM_WIDTH               1
 #define MAC_PFR_PR_INDEX               0
 #define MAC_PFR_PR_WIDTH               1
+#define MAC_PFR_VTFE_INDEX             16
+#define MAC_PFR_VTFE_WIDTH             1
 #define MAC_PMTCSR_MGKPKTEN_INDEX      1
 #define MAC_PMTCSR_MGKPKTEN_WIDTH      1
 #define MAC_PMTCSR_PWRDWN_INDEX                0
 #define MAC_TCR_SS_WIDTH               2
 #define MAC_TCR_TE_INDEX               0
 #define MAC_TCR_TE_WIDTH               1
+#define MAC_VLANHTR_VLHT_INDEX         0
+#define MAC_VLANHTR_VLHT_WIDTH         16
+#define MAC_VLANIR_VLTI_INDEX          20
+#define MAC_VLANIR_VLTI_WIDTH          1
+#define MAC_VLANIR_CSVL_INDEX          19
+#define MAC_VLANIR_CSVL_WIDTH          1
 #define MAC_VLANTR_DOVLTC_INDEX                20
 #define MAC_VLANTR_DOVLTC_WIDTH                1
 #define MAC_VLANTR_ERSVLM_INDEX                19
 #define MAC_VLANTR_ERSVLM_WIDTH                1
 #define MAC_VLANTR_ESVL_INDEX          18
 #define MAC_VLANTR_ESVL_WIDTH          1
+#define MAC_VLANTR_ETV_INDEX           16
+#define MAC_VLANTR_ETV_WIDTH           1
 #define MAC_VLANTR_EVLS_INDEX          21
 #define MAC_VLANTR_EVLS_WIDTH          2
 #define MAC_VLANTR_EVLRXS_INDEX                24
 #define MAC_VLANTR_EVLRXS_WIDTH                1
+#define MAC_VLANTR_VL_INDEX            0
+#define MAC_VLANTR_VL_WIDTH            16
+#define MAC_VLANTR_VTHM_INDEX          25
+#define MAC_VLANTR_VTHM_WIDTH          1
+#define MAC_VLANTR_VTIM_INDEX          17
+#define MAC_VLANTR_VTIM_WIDTH          1
 #define MAC_VR_DEVID_INDEX             8
 #define MAC_VR_DEVID_WIDTH             8
 #define MAC_VR_SNPSVER_INDEX           0
index 6bb76d5c817b593f7a34a11032d2579c86b2936c..346592dca33ce98dd94e24f33f99eabb373f60cb 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
 {
        char workarea[32];
        ssize_t len;
-       unsigned int scan_value;
+       int ret;
 
        if (*ppos != 0)
                return 0;
@@ -165,9 +165,8 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
                return len;
 
        workarea[len] = '\0';
-       if (sscanf(workarea, "%x", &scan_value) == 1)
-               *value = scan_value;
-       else
+       ret = kstrtouint(workarea, 16, value);
+       if (ret)
                return -EIO;
 
        return len;
index 6f1c85956d504ae7e61fb92dd111bc3863d67b9a..a9ce56d5e988b7caf0c2f926b59481cebe622556 100644 (file)
@@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
 
        if (ring->rdata) {
                for (i = 0; i < ring->rdesc_count; i++) {
-                       rdata = GET_DESC_DATA(ring, i);
+                       rdata = XGBE_GET_DESC_DATA(ring, i);
                        xgbe_unmap_skb(pdata, rdata);
                }
 
@@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
                rdesc_dma = ring->rdesc_dma;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
 
                        rdata->rdesc = rdesc;
                        rdata->rdesc_dma = rdesc_dma;
@@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
                rdesc_dma = ring->rdesc_dma;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
 
                        rdata->rdesc = rdesc;
                        rdata->rdesc_dma = rdesc_dma;
@@ -392,7 +392,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
        if ((tso && (packet->mss != ring->tx.cur_mss)) ||
            (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
                cur_index++;
-       rdata = GET_DESC_DATA(ring, cur_index);
+       rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 
        if (tso) {
                DBGPR("  TSO packet\n");
@@ -413,12 +413,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                packet->length += packet->header_len;
 
                cur_index++;
-               rdata = GET_DESC_DATA(ring, cur_index);
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
        }
 
        /* Map the (remainder of the) packet */
        for (datalen = skb_headlen(skb) - offset; datalen; ) {
-               len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+               len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
 
                skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
                                         DMA_TO_DEVICE);
@@ -437,7 +437,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                packet->length += len;
 
                cur_index++;
-               rdata = GET_DESC_DATA(ring, cur_index);
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -447,7 +447,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                offset = 0;
 
                for (datalen = skb_frag_size(frag); datalen; ) {
-                       len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+                       len = min_t(unsigned int, datalen,
+                                   XGBE_TX_MAX_BUF_SIZE);
 
                        skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
                                                   len, DMA_TO_DEVICE);
@@ -468,7 +469,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                        packet->length += len;
 
                        cur_index++;
-                       rdata = GET_DESC_DATA(ring, cur_index);
+                       rdata = XGBE_GET_DESC_DATA(ring, cur_index);
                }
        }
 
@@ -484,7 +485,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
 
 err_out:
        while (start_index < cur_index) {
-               rdata = GET_DESC_DATA(ring, start_index++);
+               rdata = XGBE_GET_DESC_DATA(ring, start_index++);
                xgbe_unmap_skb(pdata, rdata);
        }
 
@@ -507,7 +508,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
              ring->rx.realloc_index);
 
        for (i = 0; i < ring->dirty; i++) {
-               rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
 
                /* Reset rdata values */
                xgbe_unmap_skb(pdata, rdata);
index 002293b0819d19160b5ce87e7be03cee0d8aa745..699cff5d3184cfa7f309e6066a1fcf03d02f0857 100644 (file)
 
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -484,7 +486,7 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
                XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
 
                /* No MTL interrupts to be enabled */
-               XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+               XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
        }
 }
 
@@ -547,24 +549,16 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
        return 0;
 }
 
-static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
-                                  unsigned int am_mode)
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+                            struct netdev_hw_addr *ha, unsigned int *mac_reg)
 {
-       struct netdev_hw_addr *ha;
-       unsigned int mac_reg;
        unsigned int mac_addr_hi, mac_addr_lo;
        u8 *mac_addr;
-       unsigned int i;
 
-       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
-       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
+       mac_addr_lo = 0;
+       mac_addr_hi = 0;
 
-       i = 0;
-       mac_reg = MAC_MACA1HR;
-
-       netdev_for_each_uc_addr(ha, pdata->netdev) {
-               mac_addr_lo = 0;
-               mac_addr_hi = 0;
+       if (ha) {
                mac_addr = (u8 *)&mac_addr_lo;
                mac_addr[0] = ha->addr[0];
                mac_addr[1] = ha->addr[1];
@@ -574,54 +568,93 @@ static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
                mac_addr[0] = ha->addr[4];
                mac_addr[1] = ha->addr[5];
 
-               DBGPR("  adding unicast address %pM at 0x%04x\n",
-                     ha->addr, mac_reg);
+               DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
+                     *mac_reg);
 
                XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+       }
 
-               XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
-               mac_reg += MAC_MACA_INC;
-               XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
-               mac_reg += MAC_MACA_INC;
+       XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+       *mac_reg += MAC_MACA_INC;
+       XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+       *mac_reg += MAC_MACA_INC;
+}
 
-               i++;
-       }
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int mac_reg;
+       unsigned int addn_macs;
 
-       if (!am_mode) {
-               netdev_for_each_mc_addr(ha, pdata->netdev) {
-                       mac_addr_lo = 0;
-                       mac_addr_hi = 0;
-                       mac_addr = (u8 *)&mac_addr_lo;
-                       mac_addr[0] = ha->addr[0];
-                       mac_addr[1] = ha->addr[1];
-                       mac_addr[2] = ha->addr[2];
-                       mac_addr[3] = ha->addr[3];
-                       mac_addr = (u8 *)&mac_addr_hi;
-                       mac_addr[0] = ha->addr[4];
-                       mac_addr[1] = ha->addr[5];
-
-                       DBGPR("  adding multicast address %pM at 0x%04x\n",
-                             ha->addr, mac_reg);
-
-                       XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
-
-                       XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
-                       mac_reg += MAC_MACA_INC;
-                       XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
-                       mac_reg += MAC_MACA_INC;
-
-                       i++;
+       mac_reg = MAC_MACA1HR;
+       addn_macs = pdata->hw_feat.addn_mac;
+
+       if (netdev_uc_count(netdev) > addn_macs) {
+               xgbe_set_promiscuous_mode(pdata, 1);
+       } else {
+               netdev_for_each_uc_addr(ha, netdev) {
+                       xgbe_set_mac_reg(pdata, ha, &mac_reg);
+                       addn_macs--;
+               }
+
+               if (netdev_mc_count(netdev) > addn_macs) {
+                       xgbe_set_all_multicast_mode(pdata, 1);
+               } else {
+                       netdev_for_each_mc_addr(ha, netdev) {
+                               xgbe_set_mac_reg(pdata, ha, &mac_reg);
+                               addn_macs--;
+                       }
                }
        }
 
        /* Clear remaining additional MAC address entries */
-       for (; i < pdata->hw_feat.addn_mac; i++) {
-               XGMAC_IOWRITE(pdata, mac_reg, 0);
-               mac_reg += MAC_MACA_INC;
-               XGMAC_IOWRITE(pdata, mac_reg, 0);
-               mac_reg += MAC_MACA_INC;
+       while (addn_macs--)
+               xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int hash_reg;
+       unsigned int hash_table_shift, hash_table_count;
+       u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+       u32 crc;
+       unsigned int i;
+
+       hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+       hash_table_count = pdata->hw_feat.hash_table_size / 32;
+       memset(hash_table, 0, sizeof(hash_table));
+
+       /* Build the MAC Hash Table register values */
+       netdev_for_each_uc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+       }
+
+       netdev_for_each_mc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
        }
 
+       /* Set the MAC Hash Table registers */
+       hash_reg = MAC_HTR0;
+       for (i = 0; i < hash_table_count; i++) {
+               XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+               hash_reg += MAC_HTR_INC;
+       }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+       if (pdata->hw_feat.hash_table_size)
+               xgbe_set_mac_hash_table(pdata);
+       else
+               xgbe_set_mac_addn_addrs(pdata);
+
        return 0;
 }
 
@@ -738,6 +771,89 @@ static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
        return 0;
 }
 
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+       /* Enable VLAN filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+       /* Enable VLAN Hash Table filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+       /* Disable VLAN tag inverse matching */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+       /* Only filter on the lower 12-bits of the VLAN tag */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+       /* In order for the VLAN Hash Table filtering to be effective,
+        * the VLAN tag identifier in the VLAN Tag Register must not
+        * be zero.  Set the VLAN tag identifier to "1" to enable the
+        * VLAN Hash Table filtering.  This implies that a VLAN tag of
+        * 1 will always pass filtering.
+        */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+       return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+       /* Disable VLAN filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+       return 0;
+}
+
+#ifndef CRCPOLY_LE
+#define CRCPOLY_LE 0xedb88320
+#endif
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+       u32 poly = CRCPOLY_LE;
+       u32 crc = ~0;
+       u32 temp = 0;
+       unsigned char *data = (unsigned char *)&vid_le;
+       unsigned char data_byte = 0;
+       int i, bits;
+
+       bits = get_bitmask_order(VLAN_VID_MASK);
+       for (i = 0; i < bits; i++) {
+               if ((i % 8) == 0)
+                       data_byte = data[i / 8];
+
+               temp = ((crc & 1) ^ data_byte) & 1;
+               crc >>= 1;
+               data_byte >>= 1;
+
+               if (temp)
+                       crc ^= poly;
+       }
+
+       return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+       u32 crc;
+       u16 vid;
+       __le16 vid_le;
+       u16 vlan_hash_table = 0;
+
+       /* Generate the VLAN Hash Table value */
+       for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+               /* Get the CRC32 value of the VLAN ID */
+               vid_le = cpu_to_le16(vid);
+               crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+               vlan_hash_table |= (1 << crc);
+       }
+
+       /* Set the VLAN Hash Table filtering register */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+       return 0;
+}
+
 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
 {
        struct xgbe_ring_desc *rdesc = rdata->rdesc;
@@ -766,7 +882,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
 
        /* Initialze all descriptors */
        for (i = 0; i < ring->rdesc_count; i++) {
-               rdata = GET_DESC_DATA(ring, i);
+               rdata = XGBE_GET_DESC_DATA(ring, i);
                rdesc = rdata->rdesc;
 
                /* Initialize Tx descriptor
@@ -791,7 +907,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
 
        /* Update the starting address of descriptor ring */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
                          upper_32_bits(rdata->rdesc_dma));
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
@@ -848,7 +964,7 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
 
        /* Initialize all descriptors */
        for (i = 0; i < ring->rdesc_count; i++) {
-               rdata = GET_DESC_DATA(ring, i);
+               rdata = XGBE_GET_DESC_DATA(ring, i);
                rdesc = rdata->rdesc;
 
                /* Initialize Rx descriptor
@@ -882,14 +998,14 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
 
        /* Update the starting address of descriptor ring */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
                          upper_32_bits(rdata->rdesc_dma));
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
        /* Update the Rx Descriptor Tail Pointer */
-       rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
@@ -933,7 +1049,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        if (tx_coalesce && !channel->tx_timer_active)
                ring->coalesce_count = 0;
 
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        rdesc = rdata->rdesc;
 
        /* Create a context descriptor if this is a TSO packet */
@@ -977,7 +1093,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
                }
 
                ring->cur++;
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdesc = rdata->rdesc;
        }
 
@@ -1034,7 +1150,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
 
        for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
                ring->cur++;
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdesc = rdata->rdesc;
 
                /* Update buffer address */
@@ -1074,7 +1190,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        wmb();
 
        /* Set OWN bit for the first descriptor */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        rdesc = rdata->rdesc;
        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
@@ -1088,7 +1204,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        /* Issue a poll command to Tx DMA by writing address
         * of next immediate free descriptor */
        ring->cur++;
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
@@ -1113,11 +1229,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        struct xgbe_packet_data *packet = &ring->packet_data;
+       struct net_device *netdev = channel->pdata->netdev;
        unsigned int err, etlt;
 
        DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
 
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        rdesc = rdata->rdesc;
 
        /* Check for data availability */
@@ -1153,7 +1270,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        DBGPR("  err=%u, etlt=%#x\n", err, etlt);
 
        if (!err || (err && !etlt)) {
-               if (etlt == 0x09) {
+               if ((etlt == 0x09) &&
+                   (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
                                       VLAN_CTAG, 1);
                        packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
@@ -1188,56 +1306,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
        return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
 }
 
-static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
-                                      enum xgbe_int_state int_state)
+static int xgbe_enable_int(struct xgbe_channel *channel,
+                          enum xgbe_int int_id)
 {
        unsigned int dma_ch_ier;
 
-       if (int_state == XGMAC_INT_STATE_SAVE) {
-               channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               channel->saved_ier &= DMA_INTERRUPT_MASK;
-       } else {
-               dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               dma_ch_ier |= channel->saved_ier;
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
-       }
-}
+       dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
 
-static int xgbe_enable_int(struct xgbe_channel *channel,
-                          enum xgbe_int int_id)
-{
        switch (int_id) {
-       case XGMAC_INT_DMA_ISR_DC0IS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
-               break;
        case XGMAC_INT_DMA_CH_SR_TI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_TPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_TBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+               break;
+       case XGMAC_INT_DMA_CH_SR_TI_RI:
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_FBE:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
                break;
        case XGMAC_INT_DMA_ALL:
-               xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+               dma_ch_ier |= channel->saved_ier;
                break;
        default:
                return -1;
        }
 
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
        return 0;
 }
 
@@ -1246,42 +1356,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
 {
        unsigned int dma_ch_ier;
 
+       dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
        switch (int_id) {
-       case XGMAC_INT_DMA_ISR_DC0IS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
-               break;
        case XGMAC_INT_DMA_CH_SR_TI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_TPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_TBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+               break;
+       case XGMAC_INT_DMA_CH_SR_TI_RI:
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_FBE:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
                break;
        case XGMAC_INT_DMA_ALL:
-               xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
-
-               dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               dma_ch_ier &= ~DMA_INTERRUPT_MASK;
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+               channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+               dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
                break;
        default:
                return -1;
        }
 
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
        return 0;
 }
 
@@ -1335,6 +1447,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
 
        /* Set the System Bus mode */
        XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+       XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
 }
 
 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
@@ -1342,23 +1455,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
        unsigned int arcache, awcache;
 
        arcache = 0;
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
        XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
 
        awcache = 0;
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
        XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
 }
 
@@ -1388,66 +1501,66 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
        /* Calculate Tx/Rx fifo share per queue */
        switch (fifo_size) {
        case 0:
-               q_fifo_size = FIFO_SIZE_B(128);
+               q_fifo_size = XGBE_FIFO_SIZE_B(128);
                break;
        case 1:
-               q_fifo_size = FIFO_SIZE_B(256);
+               q_fifo_size = XGBE_FIFO_SIZE_B(256);
                break;
        case 2:
-               q_fifo_size = FIFO_SIZE_B(512);
+               q_fifo_size = XGBE_FIFO_SIZE_B(512);
                break;
        case 3:
-               q_fifo_size = FIFO_SIZE_KB(1);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(1);
                break;
        case 4:
-               q_fifo_size = FIFO_SIZE_KB(2);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(2);
                break;
        case 5:
-               q_fifo_size = FIFO_SIZE_KB(4);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(4);
                break;
        case 6:
-               q_fifo_size = FIFO_SIZE_KB(8);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(8);
                break;
        case 7:
-               q_fifo_size = FIFO_SIZE_KB(16);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(16);
                break;
        case 8:
-               q_fifo_size = FIFO_SIZE_KB(32);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(32);
                break;
        case 9:
-               q_fifo_size = FIFO_SIZE_KB(64);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(64);
                break;
        case 10:
-               q_fifo_size = FIFO_SIZE_KB(128);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(128);
                break;
        case 11:
-               q_fifo_size = FIFO_SIZE_KB(256);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(256);
                break;
        }
        q_fifo_size = q_fifo_size / queue_count;
 
        /* Set the queue fifo size programmable value */
-       if (q_fifo_size >= FIFO_SIZE_KB(256))
+       if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
                p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(128))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
                p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(64))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
                p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(32))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
                p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(16))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
                p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(8))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
                p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(4))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
                p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(2))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
                p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(1))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
                p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
-       else if (q_fifo_size >= FIFO_SIZE_B(512))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
                p_fifo = XGMAC_MTL_FIFO_SIZE_512;
-       else if (q_fifo_size >= FIFO_SIZE_B(256))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
                p_fifo = XGMAC_MTL_FIFO_SIZE_256;
 
        return p_fifo;
@@ -1520,6 +1633,13 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
 {
        xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+       /* Filtering is done using perfect filtering and hash filtering */
+       if (pdata->hw_feat.hash_table_size) {
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+       }
 }
 
 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
@@ -1541,6 +1661,18 @@ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
 
 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
 {
+       /* Indicate that VLAN Tx CTAGs come from context descriptors */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+       /* Set the current VLAN Hash Table register value */
+       xgbe_update_vlan_hash_table(pdata);
+
+       if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               xgbe_enable_rx_vlan_filtering(pdata);
+       else
+               xgbe_disable_rx_vlan_filtering(pdata);
+
        if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
                xgbe_enable_rx_vlan_stripping(pdata);
        else
@@ -2104,7 +2236,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
 
        hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
        hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
-       hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+       hw_if->add_mac_addresses = xgbe_add_mac_addresses;
        hw_if->set_mac_address = xgbe_set_mac_address;
 
        hw_if->enable_rx_csum = xgbe_enable_rx_csum;
@@ -2112,6 +2244,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
 
        hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
        hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+       hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+       hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+       hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
 
        hw_if->read_mmd_regs = xgbe_read_mmd_regs;
        hw_if->write_mmd_regs = xgbe_write_mmd_regs;
index cfe3d93b5f52a1f944ec49c67a72daf5fad8cfb5..344e6b19ec0e1b5eb5f518d88c17e71bbe945f28 100644 (file)
@@ -144,9 +144,10 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
        }
 
        rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-       if (rx_buf_size < RX_MIN_BUF_SIZE)
-               rx_buf_size = RX_MIN_BUF_SIZE;
-       rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+       if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
+               rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+       rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+                     ~(XGBE_RX_BUF_ALIGN - 1);
 
        return rx_buf_size;
 }
@@ -155,16 +156,21 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
+       enum xgbe_int int_id;
        unsigned int i;
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring)
-                       hw_if->enable_int(channel,
-                                         XGMAC_INT_DMA_CH_SR_TI);
-               if (channel->rx_ring)
-                       hw_if->enable_int(channel,
-                                         XGMAC_INT_DMA_CH_SR_RI);
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_if->enable_int(channel, int_id);
        }
 }
 
@@ -172,16 +178,21 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
+       enum xgbe_int int_id;
        unsigned int i;
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring)
-                       hw_if->disable_int(channel,
-                                          XGMAC_INT_DMA_CH_SR_TI);
-               if (channel->rx_ring)
-                       hw_if->disable_int(channel,
-                                          XGMAC_INT_DMA_CH_SR_RI);
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_if->disable_int(channel, int_id);
        }
 }
 
@@ -377,6 +388,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
        hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
 
+       /* Translate the Hash Table size into actual number */
+       switch (hw_feat->hash_table_size) {
+       case 0:
+               break;
+       case 1:
+               hw_feat->hash_table_size = 64;
+               break;
+       case 2:
+               hw_feat->hash_table_size = 128;
+               break;
+       case 3:
+               hw_feat->hash_table_size = 256;
+               break;
+       }
+
        /* The Queue and Channel counts are zero based so increment them
         * to get the actual number
         */
@@ -396,9 +422,12 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
        napi_enable(&pdata->napi);
 }
 
-static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
 {
        napi_disable(&pdata->napi);
+
+       if (del)
+               netif_napi_del(&pdata->napi);
 }
 
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -446,7 +475,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
                        break;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
                        desc_if->unmap_skb(pdata, rdata);
                }
        }
@@ -471,7 +500,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
                        break;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
                        desc_if->unmap_skb(pdata, rdata);
                }
        }
@@ -502,7 +531,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
                netif_device_detach(netdev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata);
+       xgbe_napi_disable(pdata, 0);
 
        /* Powerdown Tx/Rx */
        hw_if->powerdown_tx(pdata);
@@ -591,7 +620,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        phy_stop(pdata->phydev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata);
+       xgbe_napi_disable(pdata, 1);
 
        xgbe_stop_tx_timers(pdata);
 
@@ -726,14 +755,14 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
 
        for (len = skb_headlen(skb); len;) {
                packet->rdesc_count++;
-               len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+               len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                for (len = skb_frag_size(frag); len; ) {
                        packet->rdesc_count++;
-                       len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+                       len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
                }
        }
 }
@@ -911,18 +940,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
        pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
        am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
 
-       if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
-               pr_mode = 1;
-       if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
-               am_mode = 1;
-       if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
-            pdata->hw_feat.addn_mac)
-               pr_mode = 1;
-
        hw_if->set_promiscuous_mode(pdata, pr_mode);
        hw_if->set_all_multicast_mode(pdata, am_mode);
-       if (!pr_mode)
-               hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+       hw_if->add_mac_addresses(pdata);
 
        DBGPR("<--xgbe_set_rx_mode\n");
 }
@@ -999,6 +1020,38 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
        return s;
 }
 
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+                               u16 vid)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+       DBGPR("-->%s\n", __func__);
+
+       set_bit(vid, pdata->active_vlans);
+       hw_if->update_vlan_hash_table(pdata);
+
+       DBGPR("<--%s\n", __func__);
+
+       return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+                                u16 vid)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+       DBGPR("-->%s\n", __func__);
+
+       clear_bit(vid, pdata->active_vlans);
+       hw_if->update_vlan_hash_table(pdata);
+
+       DBGPR("<--%s\n", __func__);
+
+       return 0;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xgbe_poll_controller(struct net_device *netdev)
 {
@@ -1021,26 +1074,26 @@ static int xgbe_set_features(struct net_device *netdev,
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int rxcsum_enabled, rxvlan_enabled;
+       unsigned int rxcsum, rxvlan, rxvlan_filter;
 
-       rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
-       rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+       rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+       rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+       rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
 
-       if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+       if ((features & NETIF_F_RXCSUM) && !rxcsum)
                hw_if->enable_rx_csum(pdata);
-               netdev_alert(netdev, "state change - rxcsum enabled\n");
-       } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+       else if (!(features & NETIF_F_RXCSUM) && rxcsum)
                hw_if->disable_rx_csum(pdata);
-               netdev_alert(netdev, "state change - rxcsum disabled\n");
-       }
 
-       if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+       if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
                hw_if->enable_rx_vlan_stripping(pdata);
-               netdev_alert(netdev, "state change - rxvlan enabled\n");
-       } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
                hw_if->disable_rx_vlan_stripping(pdata);
-               netdev_alert(netdev, "state change - rxvlan disabled\n");
-       }
+
+       if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+               hw_if->enable_rx_vlan_filtering(pdata);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+               hw_if->disable_rx_vlan_filtering(pdata);
 
        pdata->netdev_features = features;
 
@@ -1058,6 +1111,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = xgbe_change_mtu,
        .ndo_get_stats64        = xgbe_get_stats64,
+       .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = xgbe_poll_controller,
 #endif
@@ -1069,6 +1124,22 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
        return (struct net_device_ops *)&xgbe_netdev_ops;
 }
 
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+       struct xgbe_prv_data *pdata = channel->pdata;
+       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_ring *ring = channel->rx_ring;
+       struct xgbe_ring_data *rdata;
+
+       desc_if->realloc_skb(channel);
+
+       /* Update the Rx Tail Pointer Register with address of
+        * the last cleaned entry */
+       rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+                         lower_32_bits(rdata->rdesc_dma));
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
@@ -1089,8 +1160,9 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
 
        spin_lock_irqsave(&ring->lock, flags);
 
-       while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
-               rdata = GET_DESC_DATA(ring, ring->dirty);
+       while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+              (ring->dirty < ring->cur)) {
+               rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
                rdesc = rdata->rdesc;
 
                if (!hw_if->tx_complete(rdesc))
@@ -1109,7 +1181,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        }
 
        if ((ring->tx.queue_stopped == 1) &&
-           (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+           (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
                ring->tx.queue_stopped = 0;
                netif_wake_subqueue(netdev, channel->queue_index);
        }
@@ -1125,7 +1197,6 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_packet_data *packet;
@@ -1152,7 +1223,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
                cur_len = 0;
 
 read_again:
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+                       xgbe_rx_refresh(channel);
+
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
 
                if (hw_if->dev_read(channel))
                        break;
@@ -1239,16 +1313,6 @@ read_again:
                napi_gro_receive(&pdata->napi, skb);
        }
 
-       if (received) {
-               desc_if->realloc_skb(channel);
-
-               /* Update the Rx Tail Pointer Register with address of
-                * the last cleaned entry */
-               rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
-                                 lower_32_bits(rdata->rdesc_dma));
-       }
-
        DBGPR("<--xgbe_rx_poll: received = %d\n", received);
 
        return received;
@@ -1259,21 +1323,28 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
        struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
                                                   napi);
        struct xgbe_channel *channel;
-       int processed;
+       int ring_budget;
+       int processed, last_processed;
        unsigned int i;
 
        DBGPR("-->xgbe_poll: budget=%d\n", budget);
 
-       /* Cleanup Tx ring first */
-       channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++)
-               xgbe_tx_poll(channel);
-
-       /* Process Rx ring next */
        processed = 0;
-       channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++)
-               processed += xgbe_rx_poll(channel, budget - processed);
+       ring_budget = budget / pdata->rx_ring_count;
+       do {
+               last_processed = processed;
+
+               channel = pdata->channel;
+               for (i = 0; i < pdata->channel_count; i++, channel++) {
+                       /* Cleanup Tx ring first */
+                       xgbe_tx_poll(channel);
+
+                       /* Process Rx ring next */
+                       if (ring_budget > (budget - processed))
+                               ring_budget = budget - processed;
+                       processed += xgbe_rx_poll(channel, ring_budget);
+               }
+       } while ((processed < budget) && (processed != last_processed));
 
        /* If we processed everything, we are done */
        if (processed < budget) {
@@ -1296,7 +1367,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
        struct xgbe_ring_desc *rdesc;
 
        while (count--) {
-               rdata = GET_DESC_DATA(ring, idx);
+               rdata = XGBE_GET_DESC_DATA(ring, idx);
                rdesc = rdata->rdesc;
                DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
                      (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
index 8909f2b51af1599991e680e298a3a3872d945a41..f7405261f23e9462090eed314f864372efc72208 100644 (file)
@@ -331,16 +331,6 @@ static int xgbe_set_settings(struct net_device *netdev,
             (cmd->duplex != DUPLEX_FULL)))
                goto unlock;
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               /* Clear settings needed to force speeds */
-               phydev->supported &= ~SUPPORTED_1000baseT_Full;
-               phydev->supported &= ~SUPPORTED_10000baseT_Full;
-       } else {
-               /* Add settings needed to force speed */
-               phydev->supported |= SUPPORTED_1000baseT_Full;
-               phydev->supported |= SUPPORTED_10000baseT_Full;
-       }
-
        cmd->advertising &= phydev->supported;
        if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
                goto unlock;
index c83584a26713e6633675a9e82a9ab251d9dce1c0..d5a4f76e94745f38f4afb92489f8debab9b64504 100644 (file)
@@ -247,16 +247,16 @@ static int xgbe_probe(struct platform_device *pdev)
        mutex_init(&pdata->xpcs_mutex);
 
        /* Set and validate the number of descriptors for a ring */
-       BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
-       pdata->tx_desc_count = TX_DESC_CNT;
+       BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+       pdata->tx_desc_count = XGBE_TX_DESC_CNT;
        if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
                dev_err(dev, "tx descriptor count (%d) is not valid\n",
                        pdata->tx_desc_count);
                ret = -EINVAL;
                goto err_io;
        }
-       BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
-       pdata->rx_desc_count = RX_DESC_CNT;
+       BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+       pdata->rx_desc_count = XGBE_RX_DESC_CNT;
        if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
                dev_err(dev, "rx descriptor count (%d) is not valid\n",
                        pdata->rx_desc_count);
@@ -297,6 +297,16 @@ static int xgbe_probe(struct platform_device *pdev)
        *(dev->dma_mask) = DMA_BIT_MASK(40);
        dev->coherent_dma_mask = DMA_BIT_MASK(40);
 
+       if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+               pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+               pdata->arcache = XGBE_DMA_OS_ARCACHE;
+               pdata->awcache = XGBE_DMA_OS_AWCACHE;
+       } else {
+               pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+               pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+               pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+       }
+
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
                dev_err(dev, "platform_get_irq failed\n");
@@ -385,7 +395,8 @@ static int xgbe_probe(struct platform_device *pdev)
                              NETIF_F_TSO6 |
                              NETIF_F_GRO |
                              NETIF_F_HW_VLAN_CTAG_RX |
-                             NETIF_F_HW_VLAN_CTAG_TX;
+                             NETIF_F_HW_VLAN_CTAG_TX |
+                             NETIF_F_HW_VLAN_CTAG_FILTER;
 
        netdev->vlan_features |= NETIF_F_SG |
                                 NETIF_F_IP_CSUM |
@@ -396,6 +407,8 @@ static int xgbe_probe(struct platform_device *pdev)
        netdev->features |= netdev->hw_features;
        pdata->netdev_features = netdev->features;
 
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+
        xgbe_init_rx_coalesce(pdata);
        xgbe_init_tx_coalesce(pdata);
 
index ea7a5d6750eab054104a546584d3c3d487dafe07..225f22d5fe0aebbea16379c2c6ba69b45b3eb7cb 100644 (file)
@@ -375,10 +375,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
 
        phydev->autoneg = pdata->default_autoneg;
        if (phydev->autoneg == AUTONEG_DISABLE) {
-               /* Add settings needed to force speed */
-               phydev->supported |= SUPPORTED_1000baseT_Full;
-               phydev->supported |= SUPPORTED_10000baseT_Full;
-
                phydev->speed = pdata->default_speed;
                phydev->duplex = DUPLEX_FULL;
 
index ab0627162c01cd11317d4f34c89b50a08d10b99f..9e24b296e272eefae3c485e1a19170ff2989fe76 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/workqueue.h>
 #include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
 
 
 #define XGBE_DRV_NAME          "amd-xgbe"
 #define XGBE_DRV_DESC          "AMD 10 Gigabit Ethernet Driver"
 
 /* Descriptor related defines */
-#define TX_DESC_CNT            512
-#define TX_DESC_MIN_FREE       (TX_DESC_CNT >> 3)
-#define TX_DESC_MAX_PROC       (TX_DESC_CNT >> 1)
-#define RX_DESC_CNT            512
+#define XGBE_TX_DESC_CNT       512
+#define XGBE_TX_DESC_MIN_FREE  (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC  (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT       512
 
-#define TX_MAX_BUF_SIZE                (0x3fff & ~(64 - 1))
+#define XGBE_TX_MAX_BUF_SIZE   (0x3fff & ~(64 - 1))
 
-#define RX_MIN_BUF_SIZE                (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-#define RX_BUF_ALIGN           64
+#define XGBE_RX_MIN_BUF_SIZE   (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN      64
 
 #define XGBE_MAX_DMA_CHANNELS  16
-#define DMA_ARDOMAIN_SETTING   0x2
-#define DMA_ARCACHE_SETTING    0xb
-#define DMA_AWDOMAIN_SETTING   0x2
-#define DMA_AWCACHE_SETTING    0x7
-#define DMA_INTERRUPT_MASK     0x31c7
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_AXDOMAIN   0x2
+#define XGBE_DMA_OS_ARCACHE    0xb
+#define XGBE_DMA_OS_AWCACHE    0xf
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_AXDOMAIN  0x3
+#define XGBE_DMA_SYS_ARCACHE   0x0
+#define XGBE_DMA_SYS_AWCACHE   0x0
+
+#define XGBE_DMA_INTERRUPT_MASK        0x31c7
 
 #define XGMAC_MIN_PACKET       60
 #define XGMAC_STD_PACKET_MTU   1500
 #define XGMAC_JUMBO_PACKET_MTU 9000
 #define XGMAC_MAX_JUMBO_PACKET 9018
 
-#define MAX_MULTICAST_LIST     14
-#define TX_FLAGS_IP_PKT                0x00000001
-#define TX_FLAGS_TCP_PKT       0x00000002
-
 /* MDIO bus phy name */
 #define XGBE_PHY_NAME          "amd_xgbe_phy"
 #define XGBE_PRTAD             0
 #define XGMAC_DRIVER_CONTEXT   1
 #define XGMAC_IOCTL_CONTEXT    2
 
-#define FIFO_SIZE_B(x)         (x)
-#define FIFO_SIZE_KB(x)                (x * 1024)
+#define XGBE_FIFO_SIZE_B(x)    (x)
+#define XGBE_FIFO_SIZE_KB(x)   (x * 1024)
 
 #define XGBE_TC_CNT            2
 
 /* Helper macro for descriptor handling
- *  Always use GET_DESC_DATA to access the descriptor data
+ *  Always use XGBE_GET_DESC_DATA to access the descriptor data
  *  since the index is free-running and needs to be and-ed
  *  with the descriptor count value of the ring to index to
  *  the proper descriptor data.
  */
-#define GET_DESC_DATA(_ring, _idx)                             \
+#define XGBE_GET_DESC_DATA(_ring, _idx)                                \
        ((_ring)->rdata +                                       \
         ((_idx) & ((_ring)->rdesc_count - 1)))
 
 
 /* Default coalescing parameters */
-#define XGMAC_INIT_DMA_TX_USECS                100
-#define XGMAC_INIT_DMA_TX_FRAMES       16
+#define XGMAC_INIT_DMA_TX_USECS                50
+#define XGMAC_INIT_DMA_TX_FRAMES       25
 
 #define XGMAC_MAX_DMA_RIWT             0xff
-#define XGMAC_INIT_DMA_RX_USECS                100
-#define XGMAC_INIT_DMA_RX_FRAMES       16
+#define XGMAC_INIT_DMA_RX_USECS                30
+#define XGMAC_INIT_DMA_RX_FRAMES       25
 
 /* Flow control queue count */
 #define XGMAC_MAX_FLOW_CONTROL_QUEUES  8
 
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE       8
 
 struct xgbe_prv_data;
 
@@ -219,7 +226,7 @@ struct xgbe_ring_desc {
 
 /* Structure used to hold information related to the descriptor
  * and the packet associated with the descriptor (always use
- * use the GET_DESC_DATA macro to access this data from the ring)
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
  */
 struct xgbe_ring_data {
        struct xgbe_ring_desc *rdesc;   /* Virtual address of descriptor */
@@ -250,7 +257,7 @@ struct xgbe_ring {
        unsigned int rdesc_count;
 
        /* Array of descriptor data corresponding the descriptor memory
-        * (always use the GET_DESC_DATA macro to access this data)
+        * (always use the XGBE_GET_DESC_DATA macro to access this data)
         */
        struct xgbe_ring_data *rdata;
 
@@ -304,13 +311,13 @@ struct xgbe_channel {
 } ____cacheline_aligned;
 
 enum xgbe_int {
-       XGMAC_INT_DMA_ISR_DC0IS,
        XGMAC_INT_DMA_CH_SR_TI,
        XGMAC_INT_DMA_CH_SR_TPS,
        XGMAC_INT_DMA_CH_SR_TBU,
        XGMAC_INT_DMA_CH_SR_RI,
        XGMAC_INT_DMA_CH_SR_RBU,
        XGMAC_INT_DMA_CH_SR_RPS,
+       XGMAC_INT_DMA_CH_SR_TI_RI,
        XGMAC_INT_DMA_CH_SR_FBE,
        XGMAC_INT_DMA_ALL,
 };
@@ -386,7 +393,7 @@ struct xgbe_hw_if {
 
        int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
        int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
-       int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+       int (*add_mac_addresses)(struct xgbe_prv_data *);
        int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
 
        int (*enable_rx_csum)(struct xgbe_prv_data *);
@@ -394,6 +401,9 @@ struct xgbe_hw_if {
 
        int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
        int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+       int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+       int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+       int (*update_vlan_hash_table)(struct xgbe_prv_data *);
 
        int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
        void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
@@ -530,6 +540,11 @@ struct xgbe_prv_data {
        struct xgbe_hw_if hw_if;
        struct xgbe_desc_if desc_if;
 
+       /* AXI DMA settings */
+       unsigned int axdomain;
+       unsigned int arcache;
+       unsigned int awcache;
+
        /* Rings for Tx/Rx on a DMA channel */
        struct xgbe_channel *channel;
        unsigned int channel_count;
@@ -589,6 +604,9 @@ struct xgbe_prv_data {
        struct napi_struct napi;
        struct xgbe_mmc_stats mmc_stats;
 
+       /* Filtering support */
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
        /* System clock value used for Rx watchdog */
        struct clk *sysclock;
 
index 53f85bf715268db94d864695321e3890454719ce..36cc9bd07c478e7e776f7be199af0787eeb04da5 100644 (file)
@@ -105,12 +105,10 @@ struct buffer_state {
 /**
  * struct arc_emac_priv - Storage of EMAC's private information.
  * @dev:       Pointer to the current device.
- * @ndev:      Pointer to the current network device.
  * @phy_dev:   Pointer to attached PHY device.
  * @bus:       Pointer to the current MII bus.
  * @regs:      Base address of EMAC memory-mapped control registers.
  * @napi:      Structure for NAPI.
- * @stats:     Network device statistics.
  * @rxbd:      Pointer to Rx BD ring.
  * @txbd:      Pointer to Tx BD ring.
  * @rxbd_dma:  DMA handle for Rx BD ring.
@@ -127,7 +125,6 @@ struct buffer_state {
 struct arc_emac_priv {
        /* Devices */
        struct device *dev;
-       struct net_device *ndev;
        struct phy_device *phy_dev;
        struct mii_bus *bus;
 
@@ -135,7 +132,6 @@ struct arc_emac_priv {
        struct clk *clk;
 
        struct napi_struct napi;
-       struct net_device_stats stats;
 
        struct arc_emac_bd *rxbd;
        struct arc_emac_bd *txbd;
index 18e2faccebb0dcb98bc19bdc333561776aec6b95..fe5cfeace6e3e1cd5bdcbce922b5b51e0f7cc326 100644 (file)
@@ -140,7 +140,7 @@ static const struct ethtool_ops arc_emac_ethtool_ops = {
 static void arc_emac_tx_clean(struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned int i;
 
        for (i = 0; i < TX_BD_NUM; i++) {
@@ -202,7 +202,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
 
        for (work_done = 0; work_done < budget; work_done++) {
                unsigned int *last_rx_bd = &priv->last_rx_bd;
-               struct net_device_stats *stats = &priv->stats;
+               struct net_device_stats *stats = &ndev->stats;
                struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
                struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
                unsigned int pktlen, info = le32_to_cpu(rxbd->info);
@@ -318,7 +318,7 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
 {
        struct net_device *ndev = dev_instance;
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned int status;
 
        status = arc_reg_get(priv, R_STATUS);
@@ -529,7 +529,7 @@ static int arc_emac_stop(struct net_device *ndev)
 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned long miss, rxerr;
        u8 rxcrc, rxfram, rxoflow;
 
@@ -565,7 +565,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
        unsigned int len, *txbd_curr = &priv->txbd_curr;
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        __le32 *info = &priv->txbd[*txbd_curr].info;
        dma_addr_t addr;
 
@@ -720,7 +720,6 @@ static int arc_emac_probe(struct platform_device *pdev)
 
        priv = netdev_priv(ndev);
        priv->dev = &pdev->dev;
-       priv->ndev = ndev;
 
        priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
        if (IS_ERR(priv->regs)) {
index 1cda49a28f7f0e9a672ae775a832eba31ad76fb3..52fdfe22597807dcde7034b8b9b5a2d84080ec42 100644 (file)
@@ -639,7 +639,6 @@ int atl1c_phy_init(struct atl1c_hw *hw)
                        dev_err(&pdev->dev, "Wrong Media type %d\n",
                                hw->media_type);
                return -1;
-               break;
        }
 
        ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
@@ -682,7 +681,6 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
                break;
        default:
                return -1;
-               break;
        }
 
        if (phy_data & GIGA_PSSR_DPLX)
index 923063d2e5bbcd90a04fffe0642e8ca5bab32da6..113565da155f27373955dd3cce73e96c181ffecc 100644 (file)
@@ -618,7 +618,6 @@ int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
                break;
        default:
                return AT_ERR_PHY_SPEED;
-               break;
        }
 
        if (phy_data & MII_AT001_PSSR_DPLX)
index b460db7919a28866c5f37db08c9a30b71a857840..1546d550ac97cfb9493a874ceffdb9940fd2cb89 100644 (file)
@@ -910,7 +910,6 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
                if (netif_msg_hw(adapter))
                        dev_dbg(&pdev->dev, "error getting speed\n");
                return ATLX_ERR_PHY_SPEED;
-               break;
        }
        if (phy_data & MII_ATLX_PSSR_DPLX)
                *duplex = FULL_DUPLEX;
index 6746bd7171460100a4d224e55bdcbe62a4efcecf..c194bc687c30e596df83f16ff09cf0e2b6682b5d 100644 (file)
@@ -2493,7 +2493,6 @@ static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
                break;
        default:
                return ATLX_ERR_PHY_SPEED;
-               break;
        }
 
        if (phy_data & MII_ATLX_PSSR_DPLX)
@@ -2933,11 +2932,9 @@ static int atl2_validate_option(int *value, struct atl2_option *opt)
                case OPTION_ENABLED:
                        printk(KERN_INFO "%s Enabled\n", opt->name);
                        return 0;
-                       break;
                case OPTION_DISABLED:
                        printk(KERN_INFO "%s Disabled\n", opt->name);
                        return 0;
-                       break;
                }
                break;
        case range_option:
index 3e488094b0731811459c66dcb0517d00cb7dfbbe..7dcfb19a31c888894121aa03c83bc8fe439bc4cf 100644 (file)
@@ -72,23 +72,23 @@ config BCMGENET
          Broadcom BCM7xxx Set Top Box family chipset.
 
 config BNX2
-       tristate "Broadcom NetXtremeII support"
+       tristate "QLogic NetXtremeII support"
        depends on PCI
        select CRC32
        select FW_LOADER
        ---help---
-         This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
+         This driver supports QLogic NetXtremeII gigabit Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
          will be called bnx2.  This is recommended.
 
 config CNIC
-       tristate "Broadcom CNIC support"
+       tristate "QLogic CNIC support"
        depends on PCI
        select BNX2
        select UIO
        ---help---
-         This driver supports offload features of Broadcom NetXtremeII
+         This driver supports offload features of QLogic NetXtremeII
          gigabit Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
index 5776e503e4c57eb374e304fecc8e0fa44e2e5f85..6f4e18644bd4e5089c7485acd7417a0adff76bff 100644 (file)
@@ -81,14 +81,14 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
 {
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
-                       d + DESC_ADDR_HI_STATUS_LEN);
+                    d + DESC_ADDR_HI_STATUS_LEN);
 #endif
        __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
 }
 
 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
-                                               struct dma_desc *desc,
-                                               unsigned int port)
+                                            struct dma_desc *desc,
+                                            unsigned int port)
 {
        /* Ports are latched, so write upper address first */
        tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
@@ -108,7 +108,7 @@ static int bcm_sysport_set_settings(struct net_device *dev,
 }
 
 static int bcm_sysport_get_settings(struct net_device *dev,
-                                       struct ethtool_cmd *cmd)
+                                   struct ethtool_cmd *cmd)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
 
@@ -119,14 +119,14 @@ static int bcm_sysport_get_settings(struct net_device *dev,
 }
 
 static int bcm_sysport_set_rx_csum(struct net_device *dev,
-                                       netdev_features_t wanted)
+                                  netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
 
-       priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+       priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
        reg = rxchk_readl(priv, RXCHK_CONTROL);
-       if (priv->rx_csum_en)
+       if (priv->rx_chk_en)
                reg |= RXCHK_EN;
        else
                reg &= ~RXCHK_EN;
@@ -134,7 +134,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
        /* If UniMAC forwards CRC, we need to skip over it to get
         * a valid CHK bit to be set in the per-packet status word
         */
-       if (priv->rx_csum_en && priv->crc_fwd)
+       if (priv->rx_chk_en && priv->crc_fwd)
                reg |= RXCHK_SKIP_FCS;
        else
                reg &= ~RXCHK_SKIP_FCS;
@@ -145,7 +145,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
 }
 
 static int bcm_sysport_set_tx_csum(struct net_device *dev,
-                                       netdev_features_t wanted)
+                                  netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
@@ -165,7 +165,7 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
 }
 
 static int bcm_sysport_set_features(struct net_device *dev,
-                                       netdev_features_t features)
+                                   netdev_features_t features)
 {
        netdev_features_t changed = features ^ dev->features;
        netdev_features_t wanted = dev->wanted_features;
@@ -261,7 +261,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        /* RXCHK misc statistics */
        STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
        STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
-                       RXCHK_OTHER_DISC_CNTR),
+                  RXCHK_OTHER_DISC_CNTR),
        /* RBUF misc statistics */
        STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
        STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
@@ -270,7 +270,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
 
 static void bcm_sysport_get_drvinfo(struct net_device *dev,
-                                       struct ethtool_drvinfo *info)
+                                   struct ethtool_drvinfo *info)
 {
        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
        strlcpy(info->version, "0.1", sizeof(info->version));
@@ -303,7 +303,7 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
 }
 
 static void bcm_sysport_get_strings(struct net_device *dev,
-                                       u32 stringset, u8 *data)
+                                   u32 stringset, u8 *data)
 {
        int i;
 
@@ -311,8 +311,8 @@ static void bcm_sysport_get_strings(struct net_device *dev,
        case ETH_SS_STATS:
                for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
                        memcpy(data + i * ETH_GSTRING_LEN,
-                               bcm_sysport_gstrings_stats[i].stat_string,
-                               ETH_GSTRING_LEN);
+                              bcm_sysport_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
                }
                break;
        default:
@@ -362,7 +362,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
 }
 
 static void bcm_sysport_get_stats(struct net_device *dev,
-                                       struct ethtool_stats *stats, u64 *data)
+                                 struct ethtool_stats *stats, u64 *data)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        int i;
@@ -384,6 +384,64 @@ static void bcm_sysport_get_stats(struct net_device *dev,
        }
 }
 
+static void bcm_sysport_get_wol(struct net_device *dev,
+                               struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+       wol->wolopts = priv->wolopts;
+
+       if (!(priv->wolopts & WAKE_MAGICSECURE))
+               return;
+
+       /* Return the programmed SecureOn password */
+       reg = umac_readl(priv, UMAC_PSW_MS);
+       put_unaligned_be16(reg, &wol->sopass[0]);
+       reg = umac_readl(priv, UMAC_PSW_LS);
+       put_unaligned_be32(reg, &wol->sopass[2]);
+}
+
+static int bcm_sysport_set_wol(struct net_device *dev,
+                              struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+
+       if (!device_can_wakeup(kdev))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~supported)
+               return -EINVAL;
+
+       /* Program the SecureOn password */
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+                           UMAC_PSW_MS);
+               umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+                           UMAC_PSW_LS);
+       }
+
+       /* Flag the device and relevant IRQ as wakeup capable */
+       if (wol->wolopts) {
+               device_set_wakeup_enable(kdev, 1);
+               enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 0;
+       } else {
+               device_set_wakeup_enable(kdev, 0);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 1;
+       }
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
        dev_kfree_skb_any(cb->skb);
@@ -406,7 +464,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
        }
 
        mapping = dma_map_single(kdev, cb->skb->data,
-                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
        ret = dma_mapping_error(kdev, mapping);
        if (ret) {
                bcm_sysport_free_cb(cb);
@@ -470,22 +528,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                to_process = p_index - priv->rx_c_index;
 
        netif_dbg(priv, rx_status, ndev,
-                       "p_index=%d rx_c_index=%d to_process=%d\n",
-                       p_index, priv->rx_c_index, to_process);
-
-       while ((processed < to_process) &&
-               (processed < budget)) {
+                 "p_index=%d rx_c_index=%d to_process=%d\n",
+                 p_index, priv->rx_c_index, to_process);
 
+       while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
                /* Extract the Receive Status Block prepended */
                rsb = (struct bcm_rsb *)skb->data;
                len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
                status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
-                       DESC_STATUS_MASK;
+                         DESC_STATUS_MASK;
 
                processed++;
                priv->rx_read_ptr++;
@@ -493,9 +549,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                        priv->rx_read_ptr = 0;
 
                netif_dbg(priv, rx_status, ndev,
-                               "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
-                               p_index, priv->rx_c_index, priv->rx_read_ptr,
-                               len, status);
+                         "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+                         p_index, priv->rx_c_index, priv->rx_read_ptr,
+                         len, status);
 
                if (unlikely(!skb)) {
                        netif_err(priv, rx_err, ndev, "out of memory!\n");
@@ -554,9 +610,9 @@ refill:
 }
 
 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
-                                       struct bcm_sysport_cb *cb,
-                                       unsigned int *bytes_compl,
-                                       unsigned int *pkts_compl)
+                                      struct bcm_sysport_cb *cb,
+                                      unsigned int *bytes_compl,
+                                      unsigned int *pkts_compl)
 {
        struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
@@ -565,8 +621,8 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
                ndev->stats.tx_bytes += cb->skb->len;
                *bytes_compl += cb->skb->len;
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                               dma_unmap_len(cb, dma_len),
-                               DMA_TO_DEVICE);
+                                dma_unmap_len(cb, dma_len),
+                                DMA_TO_DEVICE);
                ndev->stats.tx_packets++;
                (*pkts_compl)++;
                bcm_sysport_free_cb(cb);
@@ -574,7 +630,7 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
        } else if (dma_unmap_addr(cb, dma_addr)) {
                ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
                dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
-                               dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+                              dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
                dma_unmap_addr_set(cb, dma_addr, 0);
        }
 }
@@ -608,8 +664,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                last_tx_cn = num_tx_cbs - last_c_index + c_index;
 
        netif_dbg(priv, tx_done, ndev,
-                       "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
-                       ring->index, c_index, last_tx_cn, last_c_index);
+                 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+                 ring->index, c_index, last_tx_cn, last_c_index);
 
        while (last_tx_cn-- > 0) {
                cb = ring->cbs + last_c_index;
@@ -626,8 +682,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                netif_tx_wake_queue(txq);
 
        netif_dbg(priv, tx_done, ndev,
-                       "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
-                       ring->index, ring->c_index, pkts_compl, bytes_compl);
+                 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+                 ring->index, ring->c_index, pkts_compl, bytes_compl);
 
        return pkts_compl;
 }
@@ -692,6 +748,20 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       /* Stop monitoring MPD interrupt */
+       intrl2_0_mask_set(priv, INTRL2_0_MPD);
+
+       /* Clear the MagicPacket detection logic */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+}
 
 /* RX and misc interrupt routine */
 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
@@ -722,6 +792,11 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
+       if (priv->irq0_stat & INTRL2_0_MPD) {
+               netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+               bcm_sysport_resume_from_wol(priv);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -757,6 +832,15 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
+{
+       struct bcm_sysport_priv *priv = dev_id;
+
+       pm_wakeup_event(&priv->pdev->dev, 0);
+
+       return IRQ_HANDLED;
+}
+
 static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
 {
        struct sk_buff *nskb;
@@ -804,8 +888,9 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
                        csum_info |= L4_LENGTH_VALID;
                        if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
                                csum_info |= L4_UDP;
-               } else
+               } else {
                        csum_info = 0;
+               }
 
                tsb->l4_ptr_dest_map = csum_info;
        }
@@ -869,7 +954,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
        if (dma_mapping_error(kdev, mapping)) {
                netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
-                               skb->data, skb_len);
+                         skb->data, skb_len);
                ret = NETDEV_TX_OK;
                goto out;
        }
@@ -887,7 +972,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
        len_status |= (skb_len << DESC_LEN_SHIFT);
        len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
-                       DESC_STATUS_SHIFT;
+                      DESC_STATUS_SHIFT;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
 
@@ -912,7 +997,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                netif_tx_stop_queue(txq);
 
        netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
-                       ring->index, ring->desc_count, ring->curr_desc);
+                 ring->index, ring->desc_count, ring->curr_desc);
 
        ret = NETDEV_TX_OK;
 out:
@@ -1010,7 +1095,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
                return -ENOMEM;
        }
 
-       ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+       ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
        if (!ring->cbs) {
                netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
                return -ENOMEM;
@@ -1050,14 +1135,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        napi_enable(&ring->napi);
 
        netif_dbg(priv, hw, priv->netdev,
-                       "TDMA cfg, size=%d, desc_cpu=%p\n",
-                       ring->size, ring->desc_cpu);
+                 "TDMA cfg, size=%d, desc_cpu=%p\n",
+                 ring->size, ring->desc_cpu);
 
        return 0;
 }
 
 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
-                                       unsigned int index)
+                                    unsigned int index)
 {
        struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
        struct device *kdev = &priv->pdev->dev;
@@ -1088,7 +1173,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
 
 /* RDMA helper */
 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                 unsigned int enable)
 {
        unsigned int timeout = 1000;
        u32 reg;
@@ -1115,7 +1200,7 @@ static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
 
 /* TDMA helper */
 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                 unsigned int enable)
 {
        unsigned int timeout = 1000;
        u32 reg;
@@ -1153,8 +1238,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
-       priv->rx_cbs = kzalloc(priv->num_rx_bds *
-                               sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
+                               GFP_KERNEL);
        if (!priv->rx_cbs) {
                netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
                return -ENOMEM;
@@ -1186,8 +1271,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        rdma_writel(priv, 1, RDMA_MBDONE_INTR);
 
        netif_dbg(priv, hw, priv->netdev,
-                       "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
-                       priv->num_rx_bds, priv->rx_bds);
+                 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+                 priv->num_rx_bds, priv->rx_bds);
 
        return 0;
 }
@@ -1207,8 +1292,8 @@ static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
                cb = &priv->rx_cbs[i];
                if (dma_unmap_addr(cb, dma_addr))
                        dma_unmap_single(&priv->pdev->dev,
-                                       dma_unmap_addr(cb, dma_addr),
-                                       RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                        dma_unmap_addr(cb, dma_addr),
+                                        RX_BUF_LENGTH, DMA_FROM_DEVICE);
                bcm_sysport_free_cb(cb);
        }
 
@@ -1236,15 +1321,15 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
 }
 
 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                  u32 mask, unsigned int enable)
 {
        u32 reg;
 
        reg = umac_readl(priv, UMAC_CMD);
        if (enable)
-               reg |= CMD_RX_EN | CMD_TX_EN;
+               reg |= mask;
        else
-               reg &= ~(CMD_RX_EN | CMD_TX_EN);
+               reg &= ~mask;
        umac_writel(priv, reg, UMAC_CMD);
 
        /* UniMAC stops on a packet boundary, wait for a full-sized packet
@@ -1268,7 +1353,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
 }
 
 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
-                               unsigned char *addr)
+                            unsigned char *addr)
 {
        umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
                        (addr[2] << 8) | addr[3], UMAC_MAC0);
@@ -1284,11 +1369,35 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)
        topctrl_writel(priv, 0, TX_FLUSH_CNTL);
 }
 
+static void bcm_sysport_netif_start(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       /* Enable NAPI */
+       napi_enable(&priv->napi);
+
+       phy_start(priv->phydev);
+
+       /* Enable TX interrupts for the 32 TXQs */
+       intrl2_1_mask_clear(priv, 0xffffffff);
+
+       /* Last call before we start the real business */
+       netif_tx_start_all_queues(dev);
+}
+
+static void rbuf_init(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       reg = rbuf_readl(priv, RBUF_CONTROL);
+       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
 static int bcm_sysport_open(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        unsigned int i;
-       u32 reg;
        int ret;
 
        /* Reset UniMAC */
@@ -1298,12 +1407,10 @@ static int bcm_sysport_open(struct net_device *dev)
        topctrl_flush(priv);
 
        /* Disable the UniMAC RX/TX */
-       umac_enable_set(priv, 0);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
 
        /* Enable RBUF 2bytes alignment and Receive Status Block */
-       reg = rbuf_readl(priv, RBUF_CONTROL);
-       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
-       rbuf_writel(priv, reg, RBUF_CONTROL);
+       rbuf_init(priv);
 
        /* Set maximum frame length */
        umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1351,7 +1458,7 @@ static int bcm_sysport_open(struct net_device *dev)
                ret = bcm_sysport_init_tx_ring(priv, i);
                if (ret) {
                        netdev_err(dev, "failed to initialize TX ring %d\n",
-                                       i);
+                                  i);
                        goto out_free_tx_ring;
                }
        }
@@ -1379,19 +1486,10 @@ static int bcm_sysport_open(struct net_device *dev)
        if (ret)
                goto out_clear_rx_int;
 
-       /* Enable NAPI */
-       napi_enable(&priv->napi);
-
        /* Turn on UniMAC TX/RX */
-       umac_enable_set(priv, 1);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
 
-       phy_start(priv->phydev);
-
-       /* Enable TX interrupts for the 32 TXQs */
-       intrl2_1_mask_clear(priv, 0xffffffff);
-
-       /* Last call before we start the real business */
-       netif_tx_start_all_queues(dev);
+       bcm_sysport_netif_start(dev);
 
        return 0;
 
@@ -1410,12 +1508,9 @@ out_phy_disconnect:
        return ret;
 }
 
-static int bcm_sysport_stop(struct net_device *dev)
+static void bcm_sysport_netif_stop(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
-       unsigned int i;
-       u32 reg;
-       int ret;
 
        /* stop all software from updating hardware */
        netif_tx_stop_all_queues(dev);
@@ -1427,11 +1522,18 @@ static int bcm_sysport_stop(struct net_device *dev)
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
        intrl2_1_mask_set(priv, 0xffffffff);
        intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret;
+
+       bcm_sysport_netif_stop(dev);
 
        /* Disable UniMAC RX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_RX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_RX_EN, 0);
 
        ret = tdma_enable_set(priv, 0);
        if (ret) {
@@ -1449,9 +1551,7 @@ static int bcm_sysport_stop(struct net_device *dev)
        }
 
        /* Disable UniMAC TX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_TX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_TX_EN, 0);
 
        /* Free RX/TX rings SW structures */
        for (i = 0; i < dev->num_tx_queues; i++)
@@ -1477,6 +1577,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
        .get_strings            = bcm_sysport_get_strings,
        .get_ethtool_stats      = bcm_sysport_get_stats,
        .get_sset_count         = bcm_sysport_get_sset_count,
+       .get_wol                = bcm_sysport_get_wol,
+       .set_wol                = bcm_sysport_set_wol,
 };
 
 static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1518,6 +1620,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->irq0 = platform_get_irq(pdev, 0);
        priv->irq1 = platform_get_irq(pdev, 1);
+       priv->wol_irq = platform_get_irq(pdev, 2);
        if (priv->irq0 <= 0 || priv->irq1 <= 0) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
@@ -1570,6 +1673,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
+       /* Request the WOL interrupt and advertise suspend if available */
+       priv->wol_irq_disabled = 1;
+       ret = devm_request_irq(&pdev->dev, priv->wol_irq,
+                              bcm_sysport_wol_isr, 0, dev->name, priv);
+       if (!ret)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* Set the needed headroom once and for all */
        BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
        dev->needed_headroom += sizeof(struct bcm_tsb);
@@ -1585,10 +1695,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
        dev_info(&pdev->dev,
-               "Broadcom SYSTEMPORT" REV_FMT
-               " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
-               (priv->rev >> 8) & 0xff, priv->rev & 0xff,
-               priv->base, priv->irq0, priv->irq1, txq, rxq);
+                "Broadcom SYSTEMPORT" REV_FMT
+                " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+                (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+                priv->base, priv->irq0, priv->irq1, txq, rxq);
 
        return 0;
 err:
@@ -1610,6 +1720,208 @@ static int bcm_sysport_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+{
+       struct net_device *ndev = priv->netdev;
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       /* Password has already been programmed */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg |= MPD_EN;
+       reg &= ~PSW_EN;
+       if (priv->wolopts & WAKE_MAGICSECURE)
+               reg |= PSW_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Make sure RBUF entered WoL mode as result */
+       do {
+               reg = rbuf_readl(priv, RBUF_STATUS);
+               if (reg & RBUF_WOL_MODE)
+                       break;
+
+               udelay(10);
+       } while (timeout-- > 0);
+
+       /* Do not leave the UniMAC RBUF matching only MPD packets */
+       if (!timeout) {
+               reg = umac_readl(priv, UMAC_MPD_CTRL);
+               reg &= ~MPD_EN;
+               umac_writel(priv, reg, UMAC_MPD_CTRL);
+               netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
+               return -ETIMEDOUT;
+       }
+
+       /* UniMAC receive needs to be turned on */
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* Enable the interrupt wake-up source */
+       intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+
+       netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+
+       return 0;
+}
+
+static int bcm_sysport_suspend(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret = 0;
+       u32 reg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       bcm_sysport_netif_stop(dev);
+
+       phy_suspend(priv->phydev);
+
+       netif_device_detach(dev);
+
+       /* Disable UniMAC RX */
+       umac_enable_set(priv, CMD_RX_EN, 0);
+
+       ret = rdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "RDMA timeout!\n");
+               return ret;
+       }
+
+       /* Disable RXCHK if enabled */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg &= ~RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       /* Flush RX pipe */
+       if (!priv->wolopts)
+               topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+
+       ret = tdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               return ret;
+       }
+
+       /* Wait for a packet boundary */
+       usleep_range(2000, 3000);
+
+       umac_enable_set(priv, CMD_TX_EN, 0);
+
+       topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+
+       /* Free RX/TX rings SW structures */
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       bcm_sysport_fini_rx_ring(priv);
+
+       /* Get prepared for Wake-on-LAN */
+       if (device_may_wakeup(d) && priv->wolopts)
+               ret = bcm_sysport_suspend_to_wol(priv);
+
+       return ret;
+}
+
+static int bcm_sysport_resume(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* We may have been suspended and never received a WOL event that
+        * would turn off MPD detection, take care of that now
+        */
+       bcm_sysport_resume_from_wol(priv);
+
+       /* Initialize both hardware and software ring */
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ret = bcm_sysport_init_tx_ring(priv, i);
+               if (ret) {
+                       netdev_err(dev, "failed to initialize TX ring %d\n",
+                                  i);
+                       goto out_free_tx_rings;
+               }
+       }
+
+       /* Initialize linked-list */
+       tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+       /* Initialize RX ring */
+       ret = bcm_sysport_init_rx_ring(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize RX ring\n");
+               goto out_free_rx_ring;
+       }
+
+       netif_device_attach(dev);
+
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+       /* RX pipe enable */
+       topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+
+       ret = rdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "failed to enable RDMA\n");
+               goto out_free_rx_ring;
+       }
+
+       /* Enable rxhck */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg |= RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       rbuf_init(priv);
+
+       /* Set maximum frame length */
+       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* Set MAC address */
+       umac_set_hw_addr(priv, dev->dev_addr);
+
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* TX pipe enable */
+       topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+
+       umac_enable_set(priv, CMD_TX_EN, 1);
+
+       ret = tdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               goto out_free_rx_ring;
+       }
+
+       phy_resume(priv->phydev);
+
+       bcm_sysport_netif_start(dev);
+
+       return 0;
+
+out_free_rx_ring:
+       bcm_sysport_fini_rx_ring(priv);
+out_free_tx_rings:
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
+               bcm_sysport_suspend, bcm_sysport_resume);
+
 static const struct of_device_id bcm_sysport_of_match[] = {
        { .compatible = "brcm,systemport-v1.00" },
        { .compatible = "brcm,systemport" },
@@ -1623,6 +1935,7 @@ static struct platform_driver bcm_sysport_driver = {
                .name = "brcm-systemport",
                .owner = THIS_MODULE,
                .of_match_table = bcm_sysport_of_match,
+               .pm = &bcm_sysport_pm_ops,
        },
 };
 module_platform_driver(bcm_sysport_driver);
index 281c082460375611bc61c02baa476a9c0d03b33a..b08dab828101e7800d125d45f6e370169231f8f3 100644 (file)
@@ -246,6 +246,15 @@ struct bcm_rsb {
 #define  MIB_RX_CNT_RST                        (1 << 0)
 #define  MIB_RUNT_CNT_RST              (1 << 1)
 #define  MIB_TX_CNT_RST                        (1 << 2)
+
+#define UMAC_MPD_CTRL                  0x620
+#define  MPD_EN                                (1 << 0)
+#define  MSEQ_LEN_SHIFT                        16
+#define  MSEQ_LEN_MASK                 0xff
+#define  PSW_EN                                (1 << 27)
+
+#define UMAC_PSW_MS                    0x624
+#define UMAC_PSW_LS                    0x628
 #define UMAC_MDF_CTRL                  0x650
 #define UMAC_MDF_ADDR                  0x654
 
@@ -642,6 +651,7 @@ struct bcm_sysport_priv {
        struct platform_device  *pdev;
        int                     irq0;
        int                     irq1;
+       int                     wol_irq;
 
        /* Transmit rings */
        struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
@@ -664,10 +674,12 @@ struct bcm_sysport_priv {
        int                     old_duplex;
 
        /* Misc fields */
-       unsigned int            rx_csum_en:1;
+       unsigned int            rx_chk_en:1;
        unsigned int            tsb_en:1;
        unsigned int            crc_fwd:1;
        u16                     rev;
+       u32                     wolopts;
+       unsigned int            wol_irq_disabled:1;
 
        /* MIB related fields */
        struct bcm_sysport_mib  mib;
index 67d2b00473718eec10b9f1fbdbf4f6a981aa02d0..e64c963fe77540c7fd5b77c79b9f8817f3643bf5 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2.c: Broadcom NX2 network driver.
+/* bnx2.c: QLogic NX2 network driver.
  *
- * Copyright (c) 2004-2013 Broadcom Corporation
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define TX_TIMEOUT  (5*HZ)
 
 static char version[] =
-       "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+       "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 MODULE_FIRMWARE(FW_MIPS_FILE_06);
index e341bc366fa5f1d003a9355516a8ebdd81811ac8..28df35d35893360af2593de3a57b3fe13b7a9f6d 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2.h: Broadcom NX2 network driver.
+/* bnx2.h: QLogic NX2 network driver.
  *
- * Copyright (c) 2004-2013 Broadcom Corporation
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 940eb91f209d29365b4a9c0b090240f82e46e2a1..7db79c28b5ff53a663f519aed22807de81532794 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2_fw.h: Broadcom NX2 network driver.
+/* bnx2_fw.h: QLogic NX2 network driver.
  *
  * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 4cab09d3f80729a2bd843cf6b0b6490f0a8e95ad..ce8f86966c11eaecc80bca97ecb0eb6a664cd73c 100644 (file)
@@ -1482,6 +1482,7 @@ struct bnx2x {
        union pf_vf_bulletin   *pf2vf_bulletin;
        dma_addr_t              pf2vf_bulletin_mapping;
 
+       union pf_vf_bulletin            shadow_bulletin;
        struct pf_vf_bulletin_content   old_bulletin;
 
        u16 requested_nr_virtfn;
@@ -1507,8 +1508,10 @@ struct bnx2x {
 /* TCP with Timestamp Option (32) + IPv6 (40) */
 #define ETH_MAX_TPA_HEADER_SIZE                72
 
-       /* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT           min(8, L1_CACHE_SHIFT)
+       /* Max supported alignment is 256 (8 shift)
+        * minimal alignment shift 6 is optimal for 57xxx HW performance
+        */
+#define BNX2X_RX_ALIGN_SHIFT           max(6, min(8, L1_CACHE_SHIFT))
 
        /* FW uses 2 Cache lines Alignment for start packet and size
         *
@@ -1928,6 +1931,8 @@ struct bnx2x {
        struct semaphore                        stats_sema;
 
        u8                                      phys_port_id[ETH_ALEN];
+
+       struct bnx2x_link_report_data           vf_link_vars;
 };
 
 /* Tx queues may be less or equal to Rx queues */
index 4b875da1c7ed2afc0eec3854217b4919797f3e20..dca1236dd1cd15afeb97474952c3848c74fe2084 100644 (file)
@@ -1186,29 +1186,38 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 static void bnx2x_fill_report_data(struct bnx2x *bp,
                                   struct bnx2x_link_report_data *data)
 {
-       u16 line_speed = bnx2x_get_mf_speed(bp);
-
        memset(data, 0, sizeof(*data));
 
-       /* Fill the report data: effective line speed */
-       data->line_speed = line_speed;
-
-       /* Link is down */
-       if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
-               __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
-                         &data->link_report_flags);
-
-       /* Full DUPLEX */
-       if (bp->link_vars.duplex == DUPLEX_FULL)
-               __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
-
-       /* Rx Flow Control is ON */
-       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
-               __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
-
-       /* Tx Flow Control is ON */
-       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-               __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+       if (IS_PF(bp)) {
+               /* Fill the report data: effective line speed */
+               data->line_speed = bnx2x_get_mf_speed(bp);
+
+               /* Link is down */
+               if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &data->link_report_flags);
+
+               if (!BNX2X_NUM_ETH_QUEUES(bp))
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &data->link_report_flags);
+
+               /* Full DUPLEX */
+               if (bp->link_vars.duplex == DUPLEX_FULL)
+                       __set_bit(BNX2X_LINK_REPORT_FD,
+                                 &data->link_report_flags);
+
+               /* Rx Flow Control is ON */
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+                       __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                                 &data->link_report_flags);
+
+               /* Tx Flow Control is ON */
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+                       __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                                 &data->link_report_flags);
+       } else { /* VF */
+               *data = bp->vf_link_vars;
+       }
 }
 
 /**
@@ -1262,6 +1271,10 @@ void __bnx2x_link_report(struct bnx2x *bp)
         */
        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
 
+       /* propagate status to VFs */
+       if (IS_PF(bp))
+               bnx2x_iov_link_update(bp);
+
        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
                     &cur_data.link_report_flags)) {
                netif_carrier_off(bp->dev);
index 51a952c51cb1a5fda4fbb3807fc9d73da8710087..fb26bc4c42a1fd03d42ef885dc562bcf4d460ade 100644 (file)
@@ -2303,8 +2303,8 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
        return 0;
 }
 
-static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
-                                u16 idval, u8 up)
+static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+                                 u16 idval, u8 up)
 {
        struct bnx2x *bp = netdev_priv(netdev);
 
index bd0600cf72660f3fbc5ac9b2ad37b481fa0969a0..08ea91cab738a02c2c16d87e1eb1d37a054b881c 100644 (file)
@@ -216,6 +216,43 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
        return port_type;
 }
 
+static int bnx2x_get_vf_settings(struct net_device *dev,
+                                struct ethtool_cmd *cmd)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (bp->state == BNX2X_STATE_OPEN) {
+               if (test_bit(BNX2X_LINK_REPORT_FD,
+                            &bp->vf_link_vars.link_report_flags))
+                       cmd->duplex = DUPLEX_FULL;
+               else
+                       cmd->duplex = DUPLEX_HALF;
+
+               ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+       } else {
+               cmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+       }
+
+       cmd->port               = PORT_OTHER;
+       cmd->phy_address        = 0;
+       cmd->transceiver        = XCVR_INTERNAL;
+       cmd->autoneg            = AUTONEG_DISABLE;
+       cmd->maxtxpkt           = 0;
+       cmd->maxrxpkt           = 0;
+
+       DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+          "  supported 0x%x  advertising 0x%x  speed %u\n"
+          "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+          "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+          cmd->cmd, cmd->supported, cmd->advertising,
+          ethtool_cmd_speed(cmd),
+          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+       return 0;
+}
+
 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -1110,6 +1147,10 @@ static u32 bnx2x_get_link(struct net_device *dev)
        if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
                return 0;
 
+       if (IS_VF(bp))
+               return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                &bp->vf_link_vars.link_report_flags);
+
        return bp->link_vars.link_up;
 }
 
@@ -3484,8 +3525,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
 };
 
 static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
-       .get_settings           = bnx2x_get_settings,
-       .set_settings           = bnx2x_set_settings,
+       .get_settings           = bnx2x_get_vf_settings,
        .get_drvinfo            = bnx2x_get_drvinfo,
        .get_msglevel           = bnx2x_get_msglevel,
        .set_msglevel           = bnx2x_set_msglevel,
index 6a8b1453a1b96e80bc9e58eef13787fdaa6afe5e..3871ec49cc4d6200ed27db793c595847fd5d29f8 100644 (file)
@@ -2698,6 +2698,14 @@ void bnx2x__link_status_update(struct bnx2x *bp)
                bp->link_vars.duplex = DUPLEX_FULL;
                bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
                __bnx2x_link_report(bp);
+
+               bnx2x_sample_bulletin(bp);
+
+               /* if bulletin board did not have an update for link status
+                * __bnx2x_link_report will report current status
+                * but it will NOT duplicate report in case of already reported
+                * during sampling bulletin board.
+                */
                bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 }
@@ -12424,6 +12432,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_busy_poll          = bnx2x_low_latency_recv,
 #endif
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
+       .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
index eda8583f6fc0506c2c4d64fa1f7737645b36e277..662310c5f4e98c4f7d7cdda3d0062969f13ddd5b 100644 (file)
 #include <linux/crc32.h>
 #include <linux/if_vlan.h>
 
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+                           struct bnx2x_virtf **vf,
+                           struct pf_vf_bulletin_content **bulletin,
+                           bool test_queue);
+
 /* General service functions */
 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
                                         u16 pf_id)
@@ -597,8 +602,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
        rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
        if (rc) {
                BNX2X_ERR("Failed to remove multicasts\n");
-               if (mc)
-                       kfree(mc);
+               kfree(mc);
                return rc;
        }
 
@@ -1328,6 +1332,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
        /* Prepare the VFs event synchronization mechanism */
        mutex_init(&bp->vfdb->event_mutex);
 
+       mutex_init(&bp->vfdb->bulletin_mutex);
+
        return 0;
 failed:
        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1473,6 +1479,107 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
           vf->abs_vfid, q->sp_obj.func_id, q->cid);
 }
 
+static int bnx2x_max_speed_cap(struct bnx2x *bp)
+{
+       u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
+
+       if (supported &
+           (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
+               return 20000;
+
+       return 10000; /* assume lowest supported speed is 10G */
+}
+
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
+{
+       struct bnx2x_link_report_data *state = &bp->last_reported_link;
+       struct pf_vf_bulletin_content *bulletin;
+       struct bnx2x_virtf *vf;
+       bool update = true;
+       int rc = 0;
+
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
+       if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+
+               bulletin->link_speed = state->line_speed;
+               bulletin->link_flags = 0;
+               if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+               if (test_bit(BNX2X_LINK_REPORT_FD,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
+               if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
+               if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
+       } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
+                  !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+               bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+       } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
+                  (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+               bulletin->link_speed = bnx2x_max_speed_cap(bp);
+               bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
+       } else {
+               update = false;
+       }
+
+       if (update) {
+               DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
+                  "vf %d mode %u speed %d flags %x\n", idx,
+                  vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
+
+               /* Post update on VF's bulletin board */
+               rc = bnx2x_post_vf_bulletin(bp, idx);
+               if (rc) {
+                       BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+       return rc;
+}
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_virtf *vf = BP_VF(bp, idx);
+
+       if (!vf)
+               return -EINVAL;
+
+       if (vf->link_cfg == link_state)
+               return 0; /* nothing todo */
+
+       vf->link_cfg = link_state;
+
+       return bnx2x_iov_link_update_vf(bp, idx);
+}
+
+void bnx2x_iov_link_update(struct bnx2x *bp)
+{
+       int vfid;
+
+       if (!IS_SRIOV(bp))
+               return;
+
+       for_each_vf(bp, vfid)
+               bnx2x_iov_link_update_vf(bp, vfid);
+}
+
 /* called by bnx2x_nic_load */
 int bnx2x_iov_nic_init(struct bnx2x *bp)
 {
@@ -2510,22 +2617,23 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
-                            struct bnx2x_virtf **vf,
-                            struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+                           struct bnx2x_virtf **vf,
+                           struct pf_vf_bulletin_content **bulletin,
+                           bool test_queue)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
-               BNX2X_ERR("vf ndo called though PF is down\n");
+               BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
                return -EINVAL;
        }
 
        if (!IS_SRIOV(bp)) {
-               BNX2X_ERR("vf ndo called though sriov is disabled\n");
+               BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n");
                return -EINVAL;
        }
 
        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
-               BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+               BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
                          vfidx, BNX2X_NR_VIRTFN(bp));
                return -EINVAL;
        }
@@ -2535,19 +2643,18 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
        *bulletin = BP_VF_BULLETIN(bp, vfidx);
 
        if (!*vf) {
-               BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
-                         vfidx);
+               BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
                return -EINVAL;
        }
 
-       if (!(*vf)->vfqs) {
-               BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+       if (test_queue && !(*vf)->vfqs) {
+               BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
                          vfidx);
                return -EINVAL;
        }
 
        if (!*bulletin) {
-               BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+               BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
                          vfidx);
                return -EINVAL;
        }
@@ -2566,9 +2673,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
        int rc;
 
        /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
        if (rc)
                return rc;
+
        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
        if (!mac_obj || !vlan_obj) {
@@ -2591,6 +2699,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                                                 VLAN_HLEN);
                }
        } else {
+               mutex_lock(&bp->vfdb->bulletin_mutex);
                /* mac */
                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
                        /* mac configured by ndo so its in bulletin board */
@@ -2606,6 +2715,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                else
                        /* function has not been loaded yet. Show vlans as 0s */
                        memset(&ivi->vlan, 0, VLAN_HLEN);
+
+               mutex_unlock(&bp->vfdb->bulletin_mutex);
        }
 
        return 0;
@@ -2635,15 +2746,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
        struct bnx2x_virtf *vf = NULL;
        struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
-       if (rc)
-               return rc;
        if (!is_valid_ether_addr(mac)) {
                BNX2X_ERR("mac address invalid\n");
                return -EINVAL;
        }
 
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
        /* update PF's copy of the VF's bulletin. Will no longer accept mac
         * configuration requests from vf unless match this mac
         */
@@ -2652,6 +2766,10 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
 
        /* Post update on VF's bulletin board */
        rc = bnx2x_post_vf_bulletin(bp, vfidx);
+
+       /* release lock before checking return code */
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+
        if (rc) {
                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
                return rc;
@@ -2716,11 +2834,6 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        unsigned long accept_flags;
        int rc;
 
-       /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
-       if (rc)
-               return rc;
-
        if (vlan > 4095) {
                BNX2X_ERR("illegal vlan value %d\n", vlan);
                return -EINVAL;
@@ -2729,18 +2842,27 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
           vfidx, vlan, 0);
 
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+       if (rc)
+               return rc;
+
        /* update PF's copy of the VF's bulletin. No point in posting the vlan
         * to the VF since it doesn't have anything to do with it. But it useful
         * to store it here in case the VF is not up yet and we can only
         * configure the vlan later when it does. Treat vlan id 0 as remove the
         * Host tag.
         */
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
        if (vlan > 0)
                bulletin->valid_bitmap |= 1 << VLAN_VALID;
        else
                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
        bulletin->vlan = vlan;
 
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+
        /* is vf initialized and queue set up? */
        if (vf->state != VF_ENABLED ||
            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
@@ -2850,10 +2972,9 @@ out:
  * entire bulletin board excluding the crc field itself. Use the length field
  * as the Bulletin Board was posted by a PF with possibly a different version
  * from the vf which will sample it. Therefore, the length is computed by the
- * PF and the used blindly by the VF.
+ * PF and then used blindly by the VF.
  */
-u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
-                         struct pf_vf_bulletin_content *bulletin)
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
 {
        return crc32(BULLETIN_CRC_SEED,
                 ((u8 *)bulletin) + sizeof(bulletin->crc),
@@ -2863,47 +2984,74 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
 /* Check for new posts on the bulletin board */
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
 {
-       struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+       struct pf_vf_bulletin_content *bulletin;
        int attempts;
 
-       /* bulletin board hasn't changed since last sample */
-       if (bp->old_bulletin.version == bulletin.version)
-               return PFVF_BULLETIN_UNCHANGED;
+       /* sampling structure in mid post may result with corrupted data
+        * validate crc to ensure coherency.
+        */
+       for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+               u32 crc;
 
-       /* validate crc of new bulletin board */
-       if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
-               /* sampling structure in mid post may result with corrupted data
-                * validate crc to ensure coherency.
-                */
-               for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
-                       bulletin = bp->pf2vf_bulletin->content;
-                       if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
-                                                                 &bulletin))
-                               break;
-                       BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
-                                 bulletin.crc,
-                                 bnx2x_crc_vf_bulletin(bp, &bulletin));
-               }
-               if (attempts >= BULLETIN_ATTEMPTS) {
-                       BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
-                                 attempts);
-                       return PFVF_BULLETIN_CRC_ERR;
-               }
+               /* sample the bulletin board */
+               memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
+                      sizeof(union pf_vf_bulletin));
+
+               crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
+
+               if (bp->shadow_bulletin.content.crc == crc)
+                       break;
+
+               BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
+                         bp->shadow_bulletin.content.crc, crc);
+       }
+
+       if (attempts >= BULLETIN_ATTEMPTS) {
+               BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+                         attempts);
+               return PFVF_BULLETIN_CRC_ERR;
        }
+       bulletin = &bp->shadow_bulletin.content;
+
+       /* bulletin board hasn't changed since last sample */
+       if (bp->old_bulletin.version == bulletin->version)
+               return PFVF_BULLETIN_UNCHANGED;
 
        /* the mac address in bulletin board is valid and is new */
-       if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
-           !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
+       if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
+           !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
                /* update new mac to net device */
-               memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+               memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+       }
+
+       if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
+               DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
+                  bulletin->link_speed, bulletin->link_flags);
+
+               bp->vf_link_vars.line_speed = bulletin->link_speed;
+               bp->vf_link_vars.link_report_flags = 0;
+               /* Link is down */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Full DUPLEX */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
+                       __set_bit(BNX2X_LINK_REPORT_FD,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Rx Flow Control is ON */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
+                       __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Tx Flow Control is ON */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
+                       __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                                 &bp->vf_link_vars.link_report_flags);
+               __bnx2x_link_report(bp);
        }
 
-       /* the vlan in bulletin board is valid and is new */
-       if (bulletin.valid_bitmap & 1 << VLAN_VALID)
-               memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
-
        /* copy new bulletin board to bp */
-       bp->old_bulletin = bulletin;
+       memcpy(&bp->old_bulletin, bulletin,
+              sizeof(struct pf_vf_bulletin_content));
 
        return PFVF_BULLETIN_UPDATED;
 }
@@ -2948,6 +3096,8 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
        if (!bp->pf2vf_bulletin)
                goto alloc_mem_err;
 
+       bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
+
        return 0;
 
 alloc_mem_err:
index 96c575e147a5b14da2b67e9ea25a044a4a882d07..ca1055f3d8afda852f10412d2f551cc05b257848 100644 (file)
@@ -126,7 +126,11 @@ struct bnx2x_virtf {
 #define VF_CACHE_LINE          0x0010
 #define VF_CFG_VLAN            0x0020
 #define VF_CFG_STATS_COALESCE  0x0040
-
+#define VF_CFG_EXT_BULLETIN    0x0080
+       u8 link_cfg;            /* IFLA_VF_LINK_STATE_AUTO
+                                * IFLA_VF_LINK_STATE_ENABLE
+                                * IFLA_VF_LINK_STATE_DISABLE
+                                */
        u8 state;
 #define VF_FREE                0       /* VF ready to be acquired holds no resc */
 #define VF_ACQUIRED    1       /* VF acquired, but not initialized */
@@ -295,22 +299,22 @@ struct bnx2x_vfdb {
 #define BP_VFDB(bp)            ((bp)->vfdb)
        /* vf array */
        struct bnx2x_virtf      *vfs;
-#define BP_VF(bp, idx)         (&((bp)->vfdb->vfs[(idx)]))
-#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+#define BP_VF(bp, idx)         (&((bp)->vfdb->vfs[idx]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
 
        /* queue array - for all vfs */
        struct bnx2x_vf_queue *vfqs;
 
        /* vf HW contexts */
        struct hw_dma           context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
-#define        BP_VF_CXT_PAGE(bp, i)   (&(bp)->vfdb->context[(i)])
+#define        BP_VF_CXT_PAGE(bp, i)   (&(bp)->vfdb->context[i])
 
        /* SR-IOV information */
        struct bnx2x_sriov      sriov;
        struct hw_dma           mbx_dma;
 #define BP_VF_MBX_DMA(bp)      (&((bp)->vfdb->mbx_dma))
        struct bnx2x_vf_mbx     mbxs[BNX2X_MAX_NUM_OF_VFS];
-#define BP_VF_MBX(bp, vfid)    (&((bp)->vfdb->mbxs[(vfid)]))
+#define BP_VF_MBX(bp, vfid)    (&((bp)->vfdb->mbxs[vfid]))
 
        struct hw_dma           bulletin_dma;
 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
@@ -336,6 +340,9 @@ struct bnx2x_vfdb {
        /* sp_rtnl synchronization */
        struct mutex                    event_mutex;
        u64                             event_occur;
+
+       /* bulletin board update synchronization */
+       struct mutex                    bulletin_mutex;
 };
 
 /* queue access */
@@ -467,9 +474,10 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
 
 bool bnx2x_tlv_supported(u16 tlvtype);
 
-u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
-                         struct pf_vf_bulletin_content *bulletin);
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin);
 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                               bool support_long);
 
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
 
@@ -520,6 +528,11 @@ void bnx2x_iov_task(struct work_struct *work);
 
 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
 
+void bnx2x_iov_link_update(struct bnx2x *bp);
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+
 #else /* CONFIG_BNX2X_SRIOV */
 
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
@@ -579,6 +592,14 @@ static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
 
 static inline void bnx2x_iov_task(struct work_struct *work) {}
 static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+static inline void bnx2x_iov_link_update(struct bnx2x *bp) {}
+static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; }
+
+static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf,
+                                         int link_state) {return 0; }
+struct pf_vf_bulletin_content;
+static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                                             bool support_long) {}
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index d712d0ddd719bd4dd3a37b25736dfdccf156084c..54e0427a9ee601b2a5e499a01c1e05218ffde368 100644 (file)
@@ -251,6 +251,9 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
                      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
 
+       /* Bulletin support for bulletin board with length > legacy length */
+       req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+
        /* add list termination tlv */
        bnx2x_add_tlv(bp, req,
                      req->first_tlv.tl.length + sizeof(struct channel_tlv),
@@ -1232,6 +1235,41 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
 }
 
+static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
+                                      struct vfpf_acquire_tlv *acquire)
+{
+       /* Windows driver does one of three things:
+        * 1. Old driver doesn't have bulletin board address set.
+        * 2. 'Middle' driver sends mc_num == 32.
+        * 3. New driver sets the OS field.
+        */
+       if (!acquire->bulletin_addr ||
+           acquire->resc_request.num_mc_filters == 32 ||
+           ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
+            VF_OS_WINDOWS))
+               return true;
+
+       return false;
+}
+
+static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf,
+                                        struct bnx2x_vf_mbx *mbx)
+{
+       /* Linux drivers which correctly set the doorbell size also
+        * send a physical port request
+        */
+       if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+                                 CHANNEL_TLV_PHYS_PORT_ID))
+               return 0;
+
+       /* Issue does not exist in windows VMs */
+       if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+               return 0;
+
+       return -EOPNOTSUPP;
+}
+
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                 struct bnx2x_vf_mbx *mbx)
 {
@@ -1247,12 +1285,32 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
           acquire->resc_request.num_vlan_filters,
           acquire->resc_request.num_mc_filters);
 
+       /* Prevent VFs with old drivers from loading, since they calculate
+        * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
+        * while being upgraded.
+        */
+       rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
+       if (rc) {
+               DP(BNX2X_MSG_IOV,
+                  "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
+                  vf->abs_vfid);
+               goto out;
+       }
+
        /* acquire the resources */
        rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
 
        /* store address of vf's bulletin board */
        vf->bulletin_map = acquire->bulletin_addr;
+       if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
+               DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
+                  vf->abs_vfid);
+               vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
+       } else {
+               vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
+       }
 
+out:
        /* response */
        bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
 }
@@ -1273,6 +1331,10 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
        if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
                vf->cfg_flags |= VF_CFG_STATS_COALESCE;
 
+       /* Update VF's view of link state */
+       if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
+               bnx2x_iov_link_update_vf(bp, vf->index);
+
        /* response */
        bnx2x_vf_mbx_resp(bp, vf, rc);
 }
@@ -2007,6 +2069,17 @@ void bnx2x_vf_mbx(struct bnx2x *bp)
        }
 }
 
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                               bool support_long)
+{
+       /* Older VFs contain a bug where they can't check CRC for bulletin
+        * boards of length greater than legacy size.
+        */
+       bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
+                                         BULLETIN_CONTENT_LEGACY_SIZE;
+       bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
+}
+
 /* propagate local bulletin board to vf */
 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
 {
@@ -2023,8 +2096,9 @@ int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
 
        /* increment bulletin board version and compute crc */
        bulletin->version++;
-       bulletin->length = BULLETIN_CONTENT_SIZE;
-       bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+       bnx2x_vf_bulletin_finalize(bulletin,
+                                  (bnx2x_vf(bp, vf, cfg_flags) &
+                                   VF_CFG_EXT_BULLETIN) ? true : false);
 
        /* propagate bulletin board via dmae to vm memory */
        rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
index e21e706762c9964ad917e69b7149700a458f74fc..15670c499a206bc8c7954e95eed1d37fb29f55d1 100644 (file)
@@ -65,6 +65,7 @@ struct hw_sb_info {
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST      0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST          0x00000010
 #define BULLETIN_CONTENT_SIZE          (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_CONTENT_LEGACY_SIZE   (32)
 #define BULLETIN_ATTEMPTS      5 /* crc failures before throwing towel */
 #define BULLETIN_CRC_SEED      0
 
@@ -117,7 +118,15 @@ struct vfpf_acquire_tlv {
                /* the following fields are for debug purposes */
                u8  vf_id;              /* ME register value */
                u8  vf_os;              /* e.g. Linux, W2K8 */
-               u8 padding[2];
+#define VF_OS_SUBVERSION_MASK  (0x1f)
+#define VF_OS_MASK             (0xe0)
+#define VF_OS_SHIFT            (5)
+#define VF_OS_UNDEFINED                (0 << VF_OS_SHIFT)
+#define VF_OS_WINDOWS          (1 << VF_OS_SHIFT)
+
+               u8 padding;
+               u8 caps;
+#define VF_CAP_SUPPORT_EXT_BULLETIN    (1 << 0)
        } vfdev_info;
 
        struct vf_pf_resc_request resc_request;
@@ -393,11 +402,23 @@ struct pf_vf_bulletin_content {
                                         * to attempt to send messages on the
                                         * channel after this bit is set
                                         */
+#define LINK_VALID             3       /* alert the VF thet a new link status
+                                        * update is available for it
+                                        */
        u8 mac[ETH_ALEN];
        u8 mac_padding[2];
 
        u16 vlan;
        u8 vlan_padding[6];
+
+       u16 link_speed;                  /* Effective line speed */
+       u8 link_speed_padding[6];
+       u32 link_flags;                  /* VFPF_LINK_REPORT_XXX flags */
+#define VFPF_LINK_REPORT_LINK_DOWN      (1 << 0)
+#define VFPF_LINK_REPORT_FULL_DUPLEX    (1 << 1)
+#define VFPF_LINK_REPORT_RX_FC_ON       (1 << 2)
+#define VFPF_LINK_REPORT_TX_FC_ON       (1 << 3)
+       u8 link_flags_padding[4];
 };
 
 union pf_vf_bulletin {
index 8244e2b14bb44ccef6b14f13dae25ea717f03a74..27861a6c7ca55966048b4cb94b0b92a0dde1d269 100644 (file)
@@ -1,13 +1,15 @@
-/* cnic.c: Broadcom CNIC core network driver.
+/* cnic.c: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
- * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
+ * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define CNIC_MODULE_NAME       "cnic"
 
 static char version[] =
-       "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+       "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
              "Chen (zongxi@broadcom.com");
-MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CNIC_MODULE_VERSION);
 
index d535ae4228b4ccb12d9df6e24f7fed6911e9a6da..4baea81bae7a35dd43cde54a07eb25d0ae40883e 100644 (file)
@@ -1,6 +1,7 @@
-/* cnic.h: Broadcom CNIC core network driver.
+/* cnic.h: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index dcbca6997e8fbcb9d4f38f12acbf8718846bdfbd..b384997740717996485ed2ab6f8c882bba438d8d 100644 (file)
@@ -1,7 +1,8 @@
 
-/* cnic.c: Broadcom CNIC core network driver.
+/* cnic.c: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 5f4d5573a73dbb8252d34ec6419750c3ec06eee1..8bb36c1c4d68c472a366744224a6781880654396 100644 (file)
@@ -1,6 +1,7 @@
-/* cnic_if.h: Broadcom CNIC core network driver.
+/* cnic_if.h: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 31f55a90a19736a768b17513f3da9eeb5d3332c3..9b6885efa9e726ed453395302aa0e0f6caa73b26 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_BCMGENET) += genet.o
-genet-objs := bcmgenet.o bcmmii.o
+genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o
index 16281ad2da12c04ee8324ec85835b541479788c5..0173a6d355aa801a99d825cdc11e71efaf4b618c 100644 (file)
@@ -730,6 +730,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_msglevel           = bcmgenet_get_msglevel,
        .set_msglevel           = bcmgenet_set_msglevel,
+       .get_wol                = bcmgenet_get_wol,
+       .set_wol                = bcmgenet_set_wol,
 };
 
 /* Power down the unimac, based on mode. */
@@ -743,6 +745,10 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
                phy_detach(priv->phydev);
                break;
 
+       case GENET_POWER_WOL_MAGIC:
+               bcmgenet_wol_power_down_cfg(priv, mode);
+               break;
+
        case GENET_POWER_PASSIVE:
                /* Power down LED */
                bcmgenet_mii_reset(priv->dev);
@@ -777,6 +783,9 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
                /* enable APD */
                reg |= EXT_PWR_DN_EN_LD;
                break;
+       case GENET_POWER_WOL_MAGIC:
+               bcmgenet_wol_power_up_cfg(priv, mode);
+               return;
        default:
                break;
        }
@@ -1437,6 +1446,25 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
        }
 }
 
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask,
+                               bool enable)
+{
+       u32 reg;
+
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (enable)
+               reg |= mask;
+       else
+               reg &= ~mask;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       /* UniMAC stops on a packet boundary, wait for a full-size packet
+        * to be processed
+        */
+       if (enable == 0)
+               usleep_range(1000, 2000);
+}
+
 static int reset_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1469,6 +1497,17 @@ static int reset_umac(struct bcmgenet_priv *priv)
        return 0;
 }
 
+static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+{
+       /* Mask all interrupts.*/
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int init_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1497,10 +1536,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
                bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
 
-       /* Mask all interrupts.*/
-       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
-       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
-       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intr_disable(priv);
 
        cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
 
@@ -1802,6 +1838,13 @@ static void bcmgenet_irq_task(struct work_struct *work)
 
        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
+       if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
+               priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+               netif_dbg(priv, wol, priv->dev,
+                         "magic packet detected, waking up\n");
+               bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+       }
+
        /* Link UP/DOWN event */
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
                (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
@@ -1891,6 +1934,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
+{
+       struct bcmgenet_priv *priv = dev_id;
+
+       pm_wakeup_event(&priv->pdev->dev, 0);
+
+       return IRQ_HANDLED;
+}
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
        u32 reg;
@@ -1915,14 +1967,8 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
 
 static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
 {
-       int ret;
-
        /* From WOL-enabled suspend, switch to regular clock */
-       clk_disable(priv->clk_wol);
-       /* init umac registers to synchronize s/w with h/w */
-       ret = init_umac(priv);
-       if (ret)
-               return ret;
+       clk_disable_unprepare(priv->clk_wol);
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
@@ -1967,6 +2013,23 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static void bcmgenet_netif_start(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Start the network engine */
+       napi_enable(&priv->napi);
+
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+       netif_tx_start_all_queues(dev);
+
+       phy_start(priv->phydev);
+}
+
 static int bcmgenet_open(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -1988,18 +2051,14 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_clk_disable;
 
        /* disable ethernet MAC while updating its registers */
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+       /* Make sure we reflect the value of CRC_CMD_FWD */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~(CMD_TX_EN | CMD_RX_EN);
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (priv->wol_enabled) {
-               ret = bcmgenet_wol_resume(priv);
-               if (ret)
-                       return ret;
-       }
-
        if (phy_is_internal(priv->phydev)) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
@@ -2033,24 +2092,7 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
-       /* Start the network engine */
-       napi_enable(&priv->napi);
-
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg |= (CMD_TX_EN | CMD_RX_EN);
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       /* Make sure we reflect the value of CRC_CMD_FWD */
-       priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
-
-       device_set_wakeup_capable(&dev->dev, 1);
-
-       if (phy_is_internal(priv->phydev))
-               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
-       netif_tx_start_all_queues(dev);
-
-       phy_start(priv->phydev);
+       bcmgenet_netif_start(dev);
 
        return 0;
 
@@ -2117,33 +2159,40 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        return ret;
 }
 
+static void bcmgenet_netif_stop(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       netif_tx_stop_all_queues(dev);
+       napi_disable(&priv->napi);
+       phy_stop(priv->phydev);
+
+       bcmgenet_intr_disable(priv);
+
+       /* Wait for pending work items to complete. Since interrupts are
+        * disabled no new work will be scheduled.
+        */
+       cancel_work_sync(&priv->bcmgenet_irq_work);
+}
+
 static int bcmgenet_close(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int ret;
-       u32 reg;
 
        netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
 
-       phy_stop(priv->phydev);
+       bcmgenet_netif_stop(dev);
 
        /* Disable MAC receive */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_RX_EN;
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       netif_tx_stop_all_queues(dev);
+       umac_enable_set(priv, CMD_RX_EN, false);
 
        ret = bcmgenet_dma_teardown(priv);
        if (ret)
                return ret;
 
        /* Disable MAC transmit. TX DMA disabled have to done before this */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_TX_EN;
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       napi_disable(&priv->napi);
+       umac_enable_set(priv, CMD_TX_EN, false);
 
        /* tx reclaim */
        bcmgenet_tx_reclaim_all(dev);
@@ -2152,18 +2201,9 @@ static int bcmgenet_close(struct net_device *dev)
        free_irq(priv->irq0, priv);
        free_irq(priv->irq1, priv);
 
-       /* Wait for pending work items to complete - we are stopping
-        * the clock now. Since interrupts are disabled, no new work
-        * will be scheduled.
-        */
-       cancel_work_sync(&priv->bcmgenet_irq_work);
-
        if (phy_is_internal(priv->phydev))
                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-       if (priv->wol_enabled)
-               clk_enable(priv->clk_wol);
-
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
@@ -2450,6 +2490,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
        priv->irq0 = platform_get_irq(pdev, 0);
        priv->irq1 = platform_get_irq(pdev, 1);
+       priv->wol_irq = platform_get_irq(pdev, 2);
        if (!priv->irq0 || !priv->irq1) {
                dev_err(&pdev->dev, "can't find IRQs\n");
                err = -EINVAL;
@@ -2484,6 +2525,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
 
+       /* Request the WOL interrupt and advertise suspend if available */
+       priv->wol_irq_disabled = true;
+       err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
+                              dev->name, priv);
+       if (!err)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* Set the needed headroom to account for any possible
         * features enabling/disabling at runtime
         */
@@ -2561,6 +2609,111 @@ static int bcmgenet_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int bcmgenet_suspend(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int ret;
+
+       if (!netif_running(dev))
+               return 0;
+
+       bcmgenet_netif_stop(dev);
+
+       netif_device_detach(dev);
+
+       /* Disable MAC receive */
+       umac_enable_set(priv, CMD_RX_EN, false);
+
+       ret = bcmgenet_dma_teardown(priv);
+       if (ret)
+               return ret;
+
+       /* Disable MAC transmit. TX DMA disabled have to done before this */
+       umac_enable_set(priv, CMD_TX_EN, false);
+
+       /* tx reclaim */
+       bcmgenet_tx_reclaim_all(dev);
+       bcmgenet_fini_dma(priv);
+
+       /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+       if (device_may_wakeup(d) && priv->wolopts) {
+               bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+               clk_prepare_enable(priv->clk_wol);
+       }
+
+       /* Turn off the clocks */
+       clk_disable_unprepare(priv->clk);
+
+       return 0;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned long dma_ctrl;
+       int ret;
+       u32 reg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* Turn on the clock */
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
+
+       bcmgenet_umac_reset(priv);
+
+       ret = init_umac(priv);
+       if (ret)
+               goto out_clk_disable;
+
+       if (priv->wolopts)
+               ret = bcmgenet_wol_resume(priv);
+
+       if (ret)
+               goto out_clk_disable;
+
+       /* disable ethernet MAC while updating its registers */
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+       bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+       if (phy_is_internal(priv->phydev)) {
+               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+               reg |= EXT_ENERGY_DET_MASK;
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       }
+
+       /* Disable RX/TX DMA and flush TX queues */
+       dma_ctrl = bcmgenet_dma_disable(priv);
+
+       /* Reinitialize TDMA and RDMA and SW housekeeping */
+       ret = bcmgenet_init_dma(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize DMA\n");
+               goto out_clk_disable;
+       }
+
+       /* Always enable ring 16 - descriptor ring */
+       bcmgenet_enable_dma(priv, dma_ctrl);
+
+       netif_device_attach(dev);
+
+       bcmgenet_netif_start(dev);
+
+       return 0;
+
+out_clk_disable:
+       clk_disable_unprepare(priv->clk);
+       return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
 
 static struct platform_driver bcmgenet_driver = {
        .probe  = bcmgenet_probe,
@@ -2569,6 +2722,7 @@ static struct platform_driver bcmgenet_driver = {
                .name   = "bcmgenet",
                .owner  = THIS_MODULE,
                .of_match_table = bcmgenet_match,
+               .pm     = &bcmgenet_pm_ops,
        },
 };
 module_platform_driver(bcmgenet_driver);
index e23c993b13625bca2af3addbde66609c2f3202f0..c61cd98b662ecf3dd590da8f9af3d19c5425db37 100644 (file)
@@ -456,6 +456,7 @@ struct enet_cb {
 enum bcmgenet_power_mode {
        GENET_POWER_CABLE_SENSE = 0,
        GENET_POWER_PASSIVE,
+       GENET_POWER_WOL_MAGIC,
 };
 
 struct bcmgenet_priv;
@@ -569,6 +570,8 @@ struct bcmgenet_priv {
        int irq1;
        unsigned int irq0_stat;
        unsigned int irq1_stat;
+       int wol_irq;
+       bool wol_irq_disabled;
 
        /* HW descriptors/checksum variables */
        bool desc_64b_en;
@@ -583,7 +586,6 @@ struct bcmgenet_priv {
        struct platform_device *pdev;
 
        /* WOL */
-       unsigned long wol_enabled;
        struct clk *clk_wol;
        u32 wolopts;
 
@@ -625,4 +627,12 @@ int bcmgenet_mii_config(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
 
+/* Wake-on-LAN routines */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode);
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+                              enum bcmgenet_power_mode mode);
+
 #endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
new file mode 100644 (file)
index 0000000..b82b7e4
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)                            "bcmgenet_wol: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include "bcmgenet.h"
+
+/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet
+ * Detection is supported through ethtool
+ */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+       wol->wolopts = priv->wolopts;
+       memset(wol->sopass, 0, sizeof(wol->sopass));
+
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
+               put_unaligned_be16(reg, &wol->sopass[0]);
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
+               put_unaligned_be32(reg, &wol->sopass[2]);
+       }
+}
+
+/* ethtool function - set WOL (Wake on LAN) settings.
+ * Only for magic packet detection mode.
+ */
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       u32 reg;
+
+       if (!device_can_wakeup(kdev))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+               return -EINVAL;
+
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+                                    UMAC_MPD_PW_MS);
+               bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+                                    UMAC_MPD_PW_LS);
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+               reg |= MPD_PW_EN;
+               bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+       }
+
+       /* Flag the device and relevant IRQ as wakeup capable */
+       if (wol->wolopts) {
+               device_set_wakeup_enable(kdev, 1);
+               enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = false;
+       } else {
+               device_set_wakeup_enable(kdev, 0);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = true;
+       }
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
+static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
+{
+       struct net_device *dev = priv->dev;
+       int retries = 0;
+
+       while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS)
+               & RBUF_STATUS_WOL)) {
+               retries++;
+               if (retries > 5) {
+                       netdev_crit(dev, "polling wol mode timeout\n");
+                       return -ETIMEDOUT;
+               }
+               mdelay(1);
+       }
+
+       return retries;
+}
+
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode)
+{
+       struct net_device *dev = priv->dev;
+       u32 cpu_mask_clear;
+       int retries = 0;
+       u32 reg;
+
+       if (mode != GENET_POWER_WOL_MAGIC) {
+               netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
+               return -EINVAL;
+       }
+
+       /* disable RX */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       mdelay(10);
+
+       reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+       reg |= MPD_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Do not leave UniMAC in MPD mode only */
+       retries = bcmgenet_poll_wol_status(priv);
+       if (retries < 0) {
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+               reg &= ~MPD_EN;
+               bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+               return retries;
+       }
+
+       netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
+                 retries);
+
+       /* Enable CRC forward */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       priv->crc_fwd_en = 1;
+       reg |= CMD_CRC_FWD;
+
+       /* Receiver must be enabled for WOL MP detection */
+       reg |= CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       if (priv->hw_params->flags & GENET_HAS_EXT) {
+               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+               reg &= ~EXT_ENERGY_DET_MASK;
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       }
+
+       /* Enable the MPD interrupt */
+       cpu_mask_clear = UMAC_IRQ_MPD_R;
+
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+
+       return 0;
+}
+
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+                              enum bcmgenet_power_mode mode)
+{
+       u32 cpu_mask_set;
+       u32 reg;
+
+       if (mode != GENET_POWER_WOL_MAGIC) {
+               netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
+               return;
+       }
+
+       reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Disable CRC Forward */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_CRC_FWD;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       priv->crc_fwd_en = 0;
+
+       /* Stop monitoring magic packet IRQ */
+       cpu_mask_set = UMAC_IRQ_MPD_R;
+
+       /* Stop monitoring magic packet IRQ */
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
+}
index add8d8596084054ca1e059a360be4a1d24501122..b1338c9e8abb8afb96748ea18fe3d2ba49f9e12c 100644 (file)
@@ -136,17 +136,18 @@ static void bcmgenet_mii_setup(struct net_device *dev)
                /* pause capability */
                if (!phydev->pause)
                        cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+       }
 
+       if (status_changed) {
                reg = bcmgenet_umac_readl(priv, UMAC_CMD);
                reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
                               CMD_HD_EN |
                               CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
                reg |= cmd_bits;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-       }
 
-       if (status_changed)
                phy_print_status(phydev);
+       }
 }
 
 void bcmgenet_mii_reset(struct net_device *dev)
index 6a68e8d93309ab085bae60dd2dc5531ce1f03765..6f72771caea66a6de0d3380b6edcc83cccbd7fbf 100644 (file)
@@ -68,10 +68,8 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
        switch (asic_gen) {
        case BFI_ASIC_GEN_CT:
                return (bfi_image_ct_cna + off);
-               break;
        case BFI_ASIC_GEN_CT2:
                return (bfi_image_ct2_cna + off);
-               break;
        default:
                return NULL;
        }
@@ -83,10 +81,8 @@ bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
        switch (asic_gen) {
        case BFI_ASIC_GEN_CT:
                return bfi_image_ct_cna_size;
-               break;
        case BFI_ASIC_GEN_CT2:
                return bfi_image_ct2_cna_size;
-               break;
        default:
                return 0;
        }
index 570222c3341070445b3b0c224205872f286a3cfd..c3ce9df0041a1c65351619663134ff7e142e965e 100644 (file)
@@ -86,6 +86,17 @@ config CHELSIO_T4
          To compile this driver as a module choose M here; the module
          will be called cxgb4.
 
+config CHELSIO_T4_DCB
+       bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards"
+       default n
+       depends on CHELSIO_T4 && DCB
+       ---help---
+         Enable DCB support through rtNetlink interface.
+         Say Y here if you want to enable Data Center Bridging (DCB) support
+         in the driver.
+
+         If unsure, say N.
+
 config CHELSIO_T4VF
        tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
        depends on PCI
index 498667487f520a6e273003193b653bb623fd7b9e..1df65c915b995228d7b8d22173541c7f1d4ee4af 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 
 cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
+cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
index f503dce4ab173ca951a89db27482ac61b1f1d808..46156210df3418fe116a50cde88b07bdb2ea8790 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -85,7 +85,8 @@ enum {
        MEMWIN1_BASE_T5  = 0x52000,
        MEMWIN2_APERTURE = 65536,
        MEMWIN2_BASE     = 0x30000,
-       MEMWIN2_BASE_T5  = 0x54000,
+       MEMWIN2_APERTURE_T5 = 131072,
+       MEMWIN2_BASE_T5  = 0x60000,
 };
 
 enum dev_master {
@@ -309,6 +310,9 @@ struct adapter_params {
 
        unsigned int ofldq_wr_cred;
        bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
+
+       unsigned int max_ordird_qp;       /* Max read depth per RDMA QP */
+       unsigned int max_ird_adapter;     /* Max read depth per adapter */
 };
 
 #include "t4fw_api.h"
@@ -373,6 +377,8 @@ enum {
 struct adapter;
 struct sge_rspq;
 
+#include "cxgb4_dcb.h"
+
 struct port_info {
        struct adapter *adapter;
        u16    viid;
@@ -389,6 +395,9 @@ struct port_info {
        u8     rss_mode;
        struct link_config link_cfg;
        u16   *rss;
+#ifdef CONFIG_CHELSIO_T4_DCB
+       struct port_dcb_info dcb;     /* Data Center Bridging support */
+#endif
 };
 
 struct dentry;
@@ -603,6 +612,7 @@ struct l2t_data;
 struct adapter {
        void __iomem *regs;
        void __iomem *bar2;
+       u32 t4_bar0;
        struct pci_dev *pdev;
        struct device *pdev_dev;
        unsigned int mbox;
@@ -647,6 +657,7 @@ struct adapter {
        struct dentry *debugfs_root;
 
        spinlock_t stats_lock;
+       spinlock_t win0_lock ____cacheline_aligned_in_smp;
 };
 
 /* Defined bit width of user definable filter tuples
@@ -941,6 +952,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
                      unsigned int data_reg, u32 *vals, unsigned int nregs,
                      unsigned int start_idx);
+void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
 
 struct fw_filter_wr;
 
@@ -952,8 +964,17 @@ int t4_wait_dev_ready(struct adapter *adap);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
-int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
-                   __be32 *buf);
+
+#define T4_MEMORY_WRITE        0
+#define T4_MEMORY_READ 1
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
+                __be32 *buf, int dir);
+static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
+                                 u32 len, __be32 *buf)
+{
+       return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
+}
+
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1007,6 +1028,10 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                  unsigned int vf, unsigned int nparams, const u32 *params,
                  const u32 *val);
+int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+                         unsigned int pf, unsigned int vf,
+                         unsigned int nparams, const u32 *params,
+                         const u32 *val);
 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
                unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
                unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1025,6 +1050,8 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
                  int idx, const u8 *addr, bool persist, bool add_smt);
 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
                     bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+                       unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
                 bool rx_en, bool tx_en);
 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
@@ -1045,7 +1072,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
-int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                         u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
new file mode 100644 (file)
index 0000000..0d3a9df
--- /dev/null
@@ -0,0 +1,971 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *            Casey Leedom (leedom@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include "cxgb4.h"
+
+/* Initialize a port's Data Center Bridging state.  Typically used after a
+ * Link Down event.
+ */
+void cxgb4_dcb_state_init(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       memset(dcb, 0, sizeof(struct port_dcb_info));
+       dcb->state = CXGB4_DCB_STATE_START;
+}
+
+/* Finite State machine for Data Center Bridging.
+ */
+void cxgb4_dcb_state_fsm(struct net_device *dev,
+                        enum cxgb4_dcb_state_input input)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+       struct adapter *adap = pi->adapter;
+
+       switch (input) {
+       case CXGB4_DCB_INPUT_FW_DISABLED: {
+               /* Firmware tells us it's not doing DCB */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_START: {
+                       /* we're going to use Host DCB */
+                       dcb->state = CXGB4_DCB_STATE_HOST;
+                       dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
+                       dcb->enabled = 1;
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_HOST: {
+                       /* we're alreaady in Host DCB mode */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_ENABLED: {
+               /* Firmware tells us that it is doing DCB */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_START: {
+                       /* we're going to use Firmware DCB */
+                       dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_INCOMPLETE:
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* we're alreaady in firmware DCB mode */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
+               /* Firmware tells us that its DCB state is incomplete */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+                       /* we're already incomplete */
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* We were successfully running with firmware DCB but
+                        * now it's telling us that it's in an "incomplete
+                        * state.  We need to reset back to a ground state
+                        * of incomplete.
+                        */
+                       cxgb4_dcb_state_init(dev);
+                       dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       linkwatch_fire_event(dev);
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
+               /* Firmware tells us that its DCB state is complete */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+                       dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
+                       dcb->enabled = 1;
+                       linkwatch_fire_event(dev);
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* we're already all sync'ed */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       default:
+               goto  bad_state_input;
+       }
+       return;
+
+bad_state_input:
+       dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n",
+               input);
+       return;
+
+bad_state_transition:
+       dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n",
+               dcb->state, input);
+}
+
+/* Handle a DCB/DCBX update message from the firmware.
+ */
+void cxgb4_dcb_handle_fw_update(struct adapter *adap,
+                               const struct fw_port_cmd *pcmd)
+{
+       const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
+       int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid));
+       struct net_device *dev = adap->port[port];
+       struct port_info *pi = netdev_priv(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+       int dcb_type = pcmd->u.dcb.pgid.type;
+
+       /* Handle Firmware DCB Control messages separately since they drive
+        * our state machine.
+        */
+       if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
+               enum cxgb4_dcb_state_input input =
+                       ((pcmd->u.dcb.control.all_syncd_pkd &
+                         FW_PORT_CMD_ALL_SYNCD)
+                        ? CXGB4_DCB_STATE_FW_ALLSYNCED
+                        : CXGB4_DCB_STATE_FW_INCOMPLETE);
+
+               cxgb4_dcb_state_fsm(dev, input);
+               return;
+       }
+
+       /* It's weird, and almost certainly an error, to get Firmware DCB
+        * messages when we either haven't been told whether we're going to be
+        * doing Host or Firmware DCB; and even worse when we've been told
+        * that we're doing Host DCB!
+        */
+       if (dcb->state == CXGB4_DCB_STATE_START ||
+           dcb->state == CXGB4_DCB_STATE_HOST) {
+               dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n",
+                       dcb->state);
+               return;
+       }
+
+       /* Now handle the general Firmware DCB update messages ...
+        */
+       switch (dcb_type) {
+       case FW_PORT_DCB_TYPE_PGID:
+               dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid);
+               dcb->msgs |= CXGB4_DCB_FW_PGID;
+               break;
+
+       case FW_PORT_DCB_TYPE_PGRATE:
+               dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported;
+               memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate,
+                      sizeof(dcb->pgrate));
+               dcb->msgs |= CXGB4_DCB_FW_PGRATE;
+               break;
+
+       case FW_PORT_DCB_TYPE_PRIORATE:
+               memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate,
+                      sizeof(dcb->priorate));
+               dcb->msgs |= CXGB4_DCB_FW_PRIORATE;
+               break;
+
+       case FW_PORT_DCB_TYPE_PFC:
+               dcb->pfcen = fwdcb->pfc.pfcen;
+               dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs;
+               dcb->msgs |= CXGB4_DCB_FW_PFC;
+               break;
+
+       case FW_PORT_DCB_TYPE_APP_ID: {
+               const struct fw_port_app_priority *fwap = &fwdcb->app_priority;
+               int idx = fwap->idx;
+               struct app_priority *ap = &dcb->app_priority[idx];
+
+               struct dcb_app app = {
+                       .selector = fwap->sel_field,
+                       .protocol = be16_to_cpu(fwap->protocolid),
+                       .priority = fwap->user_prio_map,
+               };
+               int err;
+
+               err = dcb_setapp(dev, &app);
+               if (err)
+                       dev_err(adap->pdev_dev,
+                               "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n",
+                               app.selector, app.protocol, app.priority, -err);
+
+               ap->user_prio_map = fwap->user_prio_map;
+               ap->sel_field = fwap->sel_field;
+               ap->protocolid = be16_to_cpu(fwap->protocolid);
+               dcb->msgs |= CXGB4_DCB_FW_APP_ID;
+               break;
+       }
+
+       default:
+               dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n",
+                       dcb_type);
+               break;
+       }
+}
+
+/* Data Center Bridging netlink operations.
+ */
+
+
+/* Get current DCB enabled/disabled state.
+ */
+static u8 cxgb4_getstate(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       return pi->dcb.enabled;
+}
+
+/* Set DCB enabled/disabled.
+ */
+static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* Firmware doesn't provide any mechanism to control the DCB state.
+        */
+       if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
+               return 1;
+
+       return 0;
+}
+
+static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
+                            u8 *prio_type, u8 *pgid, u8 *bw_per,
+                            u8 *up_tc_map, int local)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       *prio_type = *pgid = *bw_per = *up_tc_map = 0;
+
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return;
+       }
+       *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       *bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid];
+       *up_tc_map = (1 << tc);
+
+       /* prio_type is link strict */
+       *prio_type = 0x2;
+}
+
+static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
+                               u8 *prio_type, u8 *pgid, u8 *bw_per,
+                               u8 *up_tc_map)
+{
+       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+}
+
+
+static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
+                               u8 *prio_type, u8 *pgid, u8 *bw_per,
+                               u8 *up_tc_map)
+{
+       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+}
+
+static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
+                               u8 prio_type, u8 pgid, u8 bw_per,
+                               u8 up_tc_map)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       u32 _pgid;
+       int err;
+
+       if (pgid == DCB_ATTR_VALUE_UNDEFINED)
+               return;
+       if (bw_per == DCB_ATTR_VALUE_UNDEFINED)
+               return;
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return;
+       }
+
+       _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+       _pgid &= ~(0xF << (tc * 4));
+       _pgid |= pgid << (tc * 4);
+       pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n",
+                       -err);
+               return;
+       }
+
+       memset(&pcmd, 0, sizeof(struct fw_port_cmd));
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS)
+               dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+                       -err);
+}
+
+static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per,
+                             int local)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+       } else {
+               *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid];
+       }
+}
+
+static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+       return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1);
+}
+
+static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+       return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0);
+}
+
+static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
+                                u8 bw_per)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+       if (err != FW_PORT_DCB_CFG_SUCCESS)
+               dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+                       -err);
+}
+
+/* Return whether the specified Traffic Class Priority has Priority Pause
+ * Frames enabled.
+ */
+static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           priority >= CXGB4_MAX_PRIORITY)
+               *pfccfg = 0;
+       else
+               *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+}
+
+/* Enable/disable Priority Pause Frames for the specified Traffic Class
+ * Priority.
+ */
+static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           priority >= CXGB4_MAX_PRIORITY)
+               return;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
+       pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
+
+       if (pfccfg)
+               pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+       else
+               pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err);
+               return;
+       }
+
+       pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen;
+}
+
+static u8 cxgb4_setall(struct net_device *dev)
+{
+       return 0;
+}
+
+/* Return DCB capabilities.
+ */
+static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       switch (cap_id) {
+       case DCB_CAP_ATTR_PG:
+       case DCB_CAP_ATTR_PFC:
+               *caps = true;
+               break;
+
+       case DCB_CAP_ATTR_PG_TCS:
+               /* 8 priorities for PG represented by bitmap */
+               *caps = 0x80;
+               break;
+
+       case DCB_CAP_ATTR_PFC_TCS:
+               /* 8 priorities for PFC represented by bitmap */
+               *caps = 0x80;
+               break;
+
+       case DCB_CAP_ATTR_GSP:
+               *caps = true;
+               break;
+
+       case DCB_CAP_ATTR_UP2TC:
+       case DCB_CAP_ATTR_BCN:
+               *caps = false;
+               break;
+
+       case DCB_CAP_ATTR_DCBX:
+               *caps = pi->dcb.supported;
+               break;
+
+       default:
+               *caps = false;
+       }
+
+       return 0;
+}
+
+/* Return the number of Traffic Classes for the indicated Traffic Class ID.
+ */
+static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       switch (tcs_id) {
+       case DCB_NUMTCS_ATTR_PG:
+               if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE)
+                       *num = pi->dcb.pg_num_tcs_supported;
+               else
+                       *num = 0x8;
+               break;
+
+       case DCB_NUMTCS_ATTR_PFC:
+               *num = 0x8;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Set the number of Traffic Classes supported for the indicated Traffic Class
+ * ID.
+ */
+static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num)
+{
+       /* Setting the number of Traffic Classes isn't supported.
+        */
+       return -ENOSYS;
+}
+
+/* Return whether Priority Flow Control is enabled.  */
+static u8 cxgb4_getpfcstate(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return false;
+
+       return pi->dcb.pfcen != 0;
+}
+
+/* Enable/disable Priority Flow Control. */
+static void cxgb4_setpfcstate(struct net_device *dev, u8 state)
+{
+       /* We can't enable/disable Priority Flow Control but we also can't
+        * return an error ...
+        */
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+                         int peer)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 0;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               struct fw_port_cmd pcmd;
+               int err;
+
+               if (peer)
+                       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               else
+                       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB APP read failed with %d\n",
+                               -err);
+                       return err;
+               }
+               if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id)
+                       return pcmd.u.dcb.app_priority.user_prio_map;
+
+               /* exhausted app list */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+
+       return -EEXIST;
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
+{
+       return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+}
+
+/* Write a new Application User Priority Map for the specified Application ID
+ */
+static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+                       u8 app_prio)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err;
+
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return -EINVAL;
+
+       /* DCB info gets thrown away on link up */
+       if (!netif_carrier_ok(dev))
+               return -ENOLINK;
+
+       if (app_idtype != DCB_APP_IDTYPE_ETHTYPE &&
+           app_idtype != DCB_APP_IDTYPE_PORTNUM)
+               return -EINVAL;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+               if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) {
+                       /* overwrite existing app table */
+                       pcmd.u.dcb.app_priority.protocolid = 0;
+                       break;
+               }
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+
+       if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) {
+               /* no empty slots available */
+               dev_err(adap->pdev_dev, "DCB app table full\n");
+               return -EBUSY;
+       }
+
+       /* write out new app table entry */
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+       pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
+       pcmd.u.dcb.app_priority.sel_field = app_idtype;
+       pcmd.u.dcb.app_priority.user_prio_map = app_prio;
+       pcmd.u.dcb.app_priority.idx = i;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB app table write failed with %d\n",
+                       -err);
+               return err;
+       }
+
+       return 0;
+}
+
+/* Return whether IEEE Data Center Bridging has been negotiated.
+ */
+static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+               (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
+}
+
+/* Fill in the Application User Priority Map associated with the
+ * specified Application.
+ */
+static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
+{
+       int prio;
+
+       if (!cxgb4_ieee_negotiation_complete(dev))
+               return -EINVAL;
+       if (!(app->selector && app->protocol))
+               return -EINVAL;
+
+       prio = dcb_getapp(dev, app);
+       if (prio == 0) {
+               /* If app doesn't exist in dcb_app table, try firmware
+                * directly.
+                */
+               prio = __cxgb4_getapp(dev, app->selector, app->protocol, 0);
+       }
+
+       app->priority = prio;
+       return 0;
+}
+
+/* Write a new Application User Priority Map for the specified App id. */
+static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+       if (!cxgb4_ieee_negotiation_complete(dev))
+               return -EINVAL;
+       if (!(app->selector && app->protocol && app->priority))
+               return -EINVAL;
+
+       cxgb4_setapp(dev, app->selector, app->protocol, app->priority);
+       return dcb_setapp(dev, app);
+}
+
+/* Return our DCBX parameters.
+ */
+static u8 cxgb4_getdcbx(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* This is already set by cxgb4_set_dcb_caps, so just return it */
+       return pi->dcb.supported;
+}
+
+/* Set our DCBX parameters.
+ */
+static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* Filter out requests which exceed our capabilities.
+        */
+       if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT))
+           != dcb_request)
+               return 1;
+
+       /* Can't set DCBX capabilities if DCBX isn't enabled. */
+       if (!pi->dcb.state)
+               return 1;
+
+       /* There's currently no mechanism to allow for the firmware DCBX
+        * negotiation to be changed from the Host Driver.  If the caller
+        * requests exactly the same parameters that we already have then
+        * we'll allow them to be successfully "set" ...
+        */
+       if (dcb_request != pi->dcb.supported)
+               return 1;
+
+       pi->dcb.supported = dcb_request;
+       return 0;
+}
+
+static int cxgb4_getpeer_app(struct net_device *dev,
+                            struct dcb_peer_app_info *info, u16 *app_count)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err = 0;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 1;
+
+       info->willing = 0;
+       info->error = 0;
+
+       *app_count = 0;
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = *app_count;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+       *app_count = i;
+       return err;
+}
+
+static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err = 0;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 1;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+
+               table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+               table[i].protocol =
+                       be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
+               table[i].priority = pcmd.u.dcb.app_priority.user_prio_map;
+       }
+       return err;
+}
+
+/* Return Priority Group information.
+ */
+static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       u32 pgid;
+       int i, err;
+
+       /* We're always "willing" -- the Switch Fabric always dictates the
+        * DCBX parameters to us.
+        */
+       pg->willing = true;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return err;
+       }
+       pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+       for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+               pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return err;
+       }
+
+       for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+               pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+
+       return 0;
+}
+
+/* Return Priority Flow Control information.
+ */
+static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
+       pfc->pfc_en = pi->dcb.pfcen;
+
+       return 0;
+}
+
+const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
+       .ieee_getapp            = cxgb4_ieee_getapp,
+       .ieee_setapp            = cxgb4_ieee_setapp,
+
+       /* CEE std */
+       .getstate               = cxgb4_getstate,
+       .setstate               = cxgb4_setstate,
+       .getpgtccfgtx           = cxgb4_getpgtccfg_tx,
+       .getpgbwgcfgtx          = cxgb4_getpgbwgcfg_tx,
+       .getpgtccfgrx           = cxgb4_getpgtccfg_rx,
+       .getpgbwgcfgrx          = cxgb4_getpgbwgcfg_rx,
+       .setpgtccfgtx           = cxgb4_setpgtccfg_tx,
+       .setpgbwgcfgtx          = cxgb4_setpgbwgcfg_tx,
+       .setpfccfg              = cxgb4_setpfccfg,
+       .getpfccfg              = cxgb4_getpfccfg,
+       .setall                 = cxgb4_setall,
+       .getcap                 = cxgb4_getcap,
+       .getnumtcs              = cxgb4_getnumtcs,
+       .setnumtcs              = cxgb4_setnumtcs,
+       .getpfcstate            = cxgb4_getpfcstate,
+       .setpfcstate            = cxgb4_setpfcstate,
+       .getapp                 = cxgb4_getapp,
+       .setapp                 = cxgb4_setapp,
+
+       /* DCBX configuration */
+       .getdcbx                = cxgb4_getdcbx,
+       .setdcbx                = cxgb4_setdcbx,
+
+       /* peer apps */
+       .peer_getappinfo        = cxgb4_getpeer_app,
+       .peer_getapptable       = cxgb4_getpeerapp_tbl,
+
+       /* CEE peer */
+       .cee_peer_getpg         = cxgb4_cee_peer_getpg,
+       .cee_peer_getpfc        = cxgb4_cee_peer_getpfc,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
new file mode 100644 (file)
index 0000000..1ec1d83
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#ifndef __CXGB4_DCB_H
+#define __CXGB4_DCB_H
+
+#include <linux/netdevice.h>
+#include <linux/dcbnl.h>
+#include <net/dcbnl.h>
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+
+#define CXGB4_DCBX_FW_SUPPORT \
+       (DCB_CAP_DCBX_VER_CEE | \
+        DCB_CAP_DCBX_VER_IEEE | \
+        DCB_CAP_DCBX_LLD_MANAGED)
+#define CXGB4_DCBX_HOST_SUPPORT \
+       (DCB_CAP_DCBX_VER_CEE | \
+        DCB_CAP_DCBX_VER_IEEE | \
+        DCB_CAP_DCBX_HOST)
+
+#define CXGB4_MAX_PRIORITY      CXGB4_MAX_DCBX_APP_SUPPORTED
+#define CXGB4_MAX_TCS           CXGB4_MAX_DCBX_APP_SUPPORTED
+
+#define INIT_PORT_DCB_CMD(__pcmd, __port, __op, __action) \
+       do { \
+               memset(&(__pcmd), 0, sizeof(__pcmd)); \
+               (__pcmd).op_to_portid = \
+                       cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \
+                                   FW_CMD_REQUEST | \
+                                   FW_CMD_##__op | \
+                                   FW_PORT_CMD_PORTID(__port)); \
+               (__pcmd).action_to_len16 = \
+                       cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \
+                                   FW_LEN16(pcmd)); \
+       } while (0)
+
+#define INIT_PORT_DCB_READ_PEER_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_RECV)
+
+#define INIT_PORT_DCB_READ_LOCAL_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_TRANS)
+
+#define INIT_PORT_DCB_READ_SYNC_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_DET)
+
+#define INIT_PORT_DCB_WRITE_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, EXEC, FW_PORT_ACTION_L2_DCB_CFG)
+
+/* States we can be in for a port's Data Center Bridging.
+ */
+enum cxgb4_dcb_state {
+       CXGB4_DCB_STATE_START,          /* initial unknown state */
+       CXGB4_DCB_STATE_HOST,           /* we're using Host DCB (if at all) */
+       CXGB4_DCB_STATE_FW_INCOMPLETE,  /* using firmware DCB, incomplete */
+       CXGB4_DCB_STATE_FW_ALLSYNCED,   /* using firmware DCB, all sync'ed */
+};
+
+/* Data Center Bridging state input for the Finite State Machine.
+ */
+enum cxgb4_dcb_state_input {
+       /* Input from the firmware.
+        */
+       CXGB4_DCB_INPUT_FW_DISABLED,    /* firmware DCB disabled */
+       CXGB4_DCB_INPUT_FW_ENABLED,     /* firmware DCB enabled */
+       CXGB4_DCB_INPUT_FW_INCOMPLETE,  /* firmware reports incomplete DCB */
+       CXGB4_DCB_INPUT_FW_ALLSYNCED,   /* firmware reports all sync'ed */
+
+};
+
+/* Firmware DCB messages that we've received so far ...
+ */
+enum cxgb4_dcb_fw_msgs {
+       CXGB4_DCB_FW_PGID       = 0x01,
+       CXGB4_DCB_FW_PGRATE     = 0x02,
+       CXGB4_DCB_FW_PRIORATE   = 0x04,
+       CXGB4_DCB_FW_PFC        = 0x08,
+       CXGB4_DCB_FW_APP_ID     = 0x10,
+};
+
+#define CXGB4_MAX_DCBX_APP_SUPPORTED 8
+
+/* Data Center Bridging support;
+ */
+struct port_dcb_info {
+       enum cxgb4_dcb_state state;     /* DCB State Machine */
+       enum cxgb4_dcb_fw_msgs msgs;    /* DCB Firmware messages received */
+       unsigned int supported;         /* OS DCB capabilities supported */
+       bool enabled;                   /* OS Enabled state */
+
+       /* Cached copies of DCB information sent by the firmware (in Host
+        * Native Endian format).
+        */
+       u32     pgid;                   /* Priority Group[0..7] */
+       u8      pfcen;                  /* Priority Flow Control[0..7] */
+       u8      pg_num_tcs_supported;   /* max PG Traffic Classes */
+       u8      pfc_num_tcs_supported;  /* max PFC Traffic Classes */
+       u8      pgrate[8];              /* Priority Group Rate[0..7] */
+       u8      priorate[8];            /* Priority Rate[0..7] */
+       struct app_priority { /* Application Information */
+               u8      user_prio_map;  /* Priority Map bitfield */
+               u8      sel_field;      /* Protocol ID interpretation */
+               u16     protocolid;     /* Protocol ID */
+       } app_priority[CXGB4_MAX_DCBX_APP_SUPPORTED];
+};
+
+void cxgb4_dcb_state_init(struct net_device *);
+void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
+void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
+void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
+extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
+
+#define CXGB4_DCB_ENABLED true
+
+#else /* !CONFIG_CHELSIO_T4_DCB */
+
+static inline void cxgb4_dcb_state_init(struct net_device *dev)
+{
+}
+
+#define CXGB4_DCB_ENABLED false
+
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+
+#endif /* __CXGB4_DCB_H */
index a83271cf17c3c44208ea9369600ec94f9a79e8f2..8b46534b06c1350f1273092efa4e163a38df29de 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -67,6 +67,7 @@
 #include "t4_regs.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
+#include "cxgb4_dcb.h"
 #include "l2t.h"
 
 #include <../drivers/net/bonding/bonding.h>
@@ -223,6 +224,17 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x4008, -1),
        CH_DEVICE(0x4009, -1),
        CH_DEVICE(0x400a, -1),
+       CH_DEVICE(0x400d, -1),
+       CH_DEVICE(0x400e, -1),
+       CH_DEVICE(0x4080, -1),
+       CH_DEVICE(0x4081, -1),
+       CH_DEVICE(0x4082, -1),
+       CH_DEVICE(0x4083, -1),
+       CH_DEVICE(0x4084, -1),
+       CH_DEVICE(0x4085, -1),
+       CH_DEVICE(0x4086, -1),
+       CH_DEVICE(0x4087, -1),
+       CH_DEVICE(0x4088, -1),
        CH_DEVICE(0x4401, 4),
        CH_DEVICE(0x4402, 4),
        CH_DEVICE(0x4403, 4),
@@ -235,6 +247,15 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x440a, 4),
        CH_DEVICE(0x440d, 4),
        CH_DEVICE(0x440e, 4),
+       CH_DEVICE(0x4480, 4),
+       CH_DEVICE(0x4481, 4),
+       CH_DEVICE(0x4482, 4),
+       CH_DEVICE(0x4483, 4),
+       CH_DEVICE(0x4484, 4),
+       CH_DEVICE(0x4485, 4),
+       CH_DEVICE(0x4486, 4),
+       CH_DEVICE(0x4487, 4),
+       CH_DEVICE(0x4488, 4),
        CH_DEVICE(0x5001, 4),
        CH_DEVICE(0x5002, 4),
        CH_DEVICE(0x5003, 4),
@@ -391,6 +412,17 @@ module_param_array(num_vf, uint, NULL, 0644);
 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 #endif
 
+/* TX Queue select used to determine what algorithm to use for selecting TX
+ * queue. Select between the kernel provided function (select_queue=0) or user
+ * cxgb_select_queue function (select_queue=1)
+ *
+ * Default: select_queue=0
+ */
+static int select_queue;
+module_param(select_queue, int, 0644);
+MODULE_PARM_DESC(select_queue,
+                "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
+
 /*
  * The filter TCAM has a fixed portion and a variable portion.  The fixed
  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
@@ -458,6 +490,42 @@ static void link_report(struct net_device *dev)
        }
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
+static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
+{
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+       int i;
+
+       /* We use a simple mapping of Port TX Queue Index to DCB
+        * Priority when we're enabling DCB.
+        */
+       for (i = 0; i < pi->nqsets; i++, txq++) {
+               u32 name, value;
+               int err;
+
+               name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+                       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
+                       FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
+               value = enable ? i : 0xffffffff;
+
+               /* Since we can be called while atomic (from "interrupt
+                * level") we need to issue the Set Parameters Commannd
+                * without sleeping (timeout < 0).
+                */
+               err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
+                                           &name, &value);
+
+               if (err)
+                       dev_err(adap->pdev_dev,
+                               "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
+                               enable ? "set" : "unset", pi->port_id, i, -err);
+       }
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 {
        struct net_device *dev = adapter->port[port_id];
@@ -466,8 +534,13 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
                if (link_stat)
                        netif_carrier_on(dev);
-               else
+               else {
+#ifdef CONFIG_CHELSIO_T4_DCB
+                       cxgb4_dcb_state_init(dev);
+                       dcb_tx_queue_prio_enable(dev, false);
+#endif /* CONFIG_CHELSIO_T4_DCB */
                        netif_carrier_off(dev);
+               }
 
                link_report(dev);
        }
@@ -601,10 +674,45 @@ static int link_start(struct net_device *dev)
                ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
                                    &pi->link_cfg);
        if (ret == 0)
-               ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
+               ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
+                                         true, CXGB4_DCB_ENABLED);
+
        return ret;
 }
 
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+       struct port_info *pi = netdev_priv(dev);
+
+       return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
+#else
+       return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Handle a Data Center Bridging update message from the firmware. */
+static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
+{
+       int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
+       struct net_device *dev = adap->port[port];
+       int old_dcb_enabled = cxgb4_dcb_enabled(dev);
+       int new_dcb_enabled;
+
+       cxgb4_dcb_handle_fw_update(adap, pcmd);
+       new_dcb_enabled = cxgb4_dcb_enabled(dev);
+
+       /* If the DCB has become enabled or disabled on the port then we're
+        * going to need to set up/tear down DCB Priority parameters for the
+        * TX Queues associated with the port.
+        */
+       if (new_dcb_enabled != old_dcb_enabled)
+               dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
 /* Clear a filter and release any of its resources that we own.  This also
  * clears the filter's "pending" status.
  */
@@ -709,8 +817,32 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
                const struct cpl_fw6_msg *p = (void *)rsp;
 
-               if (p->type == 0)
-                       t4_handle_fw_rpl(q->adap, p->data);
+#ifdef CONFIG_CHELSIO_T4_DCB
+               const struct fw_port_cmd *pcmd = (const void *)p->data;
+               unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
+               unsigned int action =
+                       FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
+
+               if (cmd == FW_PORT_CMD &&
+                   action == FW_PORT_ACTION_GET_PORT_INFO) {
+                       int port = FW_PORT_CMD_PORTID_GET(
+                                       be32_to_cpu(pcmd->op_to_portid));
+                       struct net_device *dev = q->adap->port[port];
+                       int state_input = ((pcmd->u.info.dcbxdis_pkd &
+                                           FW_PORT_CMD_DCBXDIS)
+                                          ? CXGB4_DCB_INPUT_FW_DISABLED
+                                          : CXGB4_DCB_INPUT_FW_ENABLED);
+
+                       cxgb4_dcb_state_fsm(dev, state_input);
+               }
+
+               if (cmd == FW_PORT_CMD &&
+                   action == FW_PORT_ACTION_L2_DCB_CFG)
+                       dcb_rpl(q->adap, pcmd);
+               else
+#endif
+                       if (p->type == 0)
+                               t4_handle_fw_rpl(q->adap, p->data);
        } else if (opcode == CPL_L2T_WRITE_RPL) {
                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 
@@ -1290,6 +1422,48 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
        return 0;
 }
 
+static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
+                            void *accel_priv, select_queue_fallback_t fallback)
+{
+       int txq;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* If a Data Center Bridging has been successfully negotiated on this
+        * link then we'll use the skb's priority to map it to a TX Queue.
+        * The skb's priority is determined via the VLAN Tag Priority Code
+        * Point field.
+        */
+       if (cxgb4_dcb_enabled(dev)) {
+               u16 vlan_tci;
+               int err;
+
+               err = vlan_get_tag(skb, &vlan_tci);
+               if (unlikely(err)) {
+                       if (net_ratelimit())
+                               netdev_warn(dev,
+                                           "TX Packet without VLAN Tag on DCB Link\n");
+                       txq = 0;
+               } else {
+                       txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+               }
+               return txq;
+       }
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+       if (select_queue) {
+               txq = (skb_rx_queue_recorded(skb)
+                       ? skb_get_rx_queue(skb)
+                       : smp_processor_id());
+
+               while (unlikely(txq >= dev->real_num_tx_queues))
+                       txq -= dev->real_num_tx_queues;
+
+               return txq;
+       }
+
+       return fallback(dev, skb) % dev->real_num_tx_queues;
+}
+
 static inline int is_offload(const struct adapter *adap)
 {
        return adap->params.offload;
@@ -2912,6 +3086,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
        loff_t avail = file_inode(file)->i_size;
        unsigned int mem = (uintptr_t)file->private_data & 3;
        struct adapter *adap = file->private_data - mem;
+       __be32 *data;
+       int ret;
 
        if (pos < 0)
                return -EINVAL;
@@ -2920,29 +3096,24 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
        if (count > avail - pos)
                count = avail - pos;
 
-       while (count) {
-               size_t len;
-               int ret, ofst;
-               __be32 data[16];
+       data = t4_alloc_mem(count);
+       if (!data)
+               return -ENOMEM;
 
-               if ((mem == MEM_MC) || (mem == MEM_MC1))
-                       ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
-               else
-                       ret = t4_edc_read(adap, mem, pos, data, NULL);
-               if (ret)
-                       return ret;
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       if (ret) {
+               t4_free_mem(data);
+               return ret;
+       }
+       ret = copy_to_user(buf, data, count);
 
-               ofst = pos % sizeof(data);
-               len = min(count, sizeof(data) - ofst);
-               if (copy_to_user(buf, (u8 *)data + ofst, len))
-                       return -EFAULT;
+       t4_free_mem(data);
+       if (ret)
+               return -EFAULT;
 
-               buf += len;
-               pos += len;
-               count -= len;
-       }
-       count = pos - *ppos;
-       *ppos = pos;
+       *ppos = pos + count;
        return count;
 }
 
@@ -3274,8 +3445,8 @@ static int tid_init(struct tid_info *t)
        return 0;
 }
 
-static int cxgb4_clip_get(const struct net_device *dev,
-                         const struct in6_addr *lip)
+int cxgb4_clip_get(const struct net_device *dev,
+                  const struct in6_addr *lip)
 {
        struct adapter *adap;
        struct fw_clip_cmd c;
@@ -3289,9 +3460,10 @@ static int cxgb4_clip_get(const struct net_device *dev,
        c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
+EXPORT_SYMBOL(cxgb4_clip_get);
 
-static int cxgb4_clip_release(const struct net_device *dev,
-                             const struct in6_addr *lip)
+int cxgb4_clip_release(const struct net_device *dev,
+                      const struct in6_addr *lip)
 {
        struct adapter *adap;
        struct fw_clip_cmd c;
@@ -3305,6 +3477,7 @@ static int cxgb4_clip_release(const struct net_device *dev,
        c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
+EXPORT_SYMBOL(cxgb4_clip_release);
 
 /**
  *     cxgb4_create_server - create an IP server
@@ -3603,7 +3776,11 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
        __be64 indices;
        int ret;
 
-       ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
+                          sizeof(indices), (__be32 *)&indices,
+                          T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
        if (!ret) {
                *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
                *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
@@ -3657,6 +3834,85 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
 }
 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
 
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
+{
+       struct adapter *adap;
+       u32 offset, memtype, memaddr;
+       u32 edc0_size, edc1_size, mc0_size, mc1_size;
+       u32 edc0_end, edc1_end, mc0_end, mc1_end;
+       int ret;
+
+       adap = netdev2adap(dev);
+
+       offset = ((stag >> 8) * 32) + adap->vres.stag.start;
+
+       /* Figure out where the offset lands in the Memory Type/Address scheme.
+        * This code assumes that the memory is laid out starting at offset 0
+        * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
+        * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
+        * MC0, and some have both MC0 and MC1.
+        */
+       edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
+       edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
+       mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
+
+       edc0_end = edc0_size;
+       edc1_end = edc0_end + edc1_size;
+       mc0_end = edc1_end + mc0_size;
+
+       if (offset < edc0_end) {
+               memtype = MEM_EDC0;
+               memaddr = offset;
+       } else if (offset < edc1_end) {
+               memtype = MEM_EDC1;
+               memaddr = offset - edc0_end;
+       } else {
+               if (offset < mc0_end) {
+                       memtype = MEM_MC0;
+                       memaddr = offset - edc1_end;
+               } else if (is_t4(adap->params.chip)) {
+                       /* T4 only has a single memory channel */
+                       goto err;
+               } else {
+                       mc1_size = EXT_MEM_SIZE_GET(
+                                       t4_read_reg(adap,
+                                                   MA_EXT_MEMORY1_BAR)) << 20;
+                       mc1_end = mc0_end + mc1_size;
+                       if (offset < mc1_end) {
+                               memtype = MEM_MC1;
+                               memaddr = offset - mc0_end;
+                       } else {
+                               /* offset beyond the end of any memory */
+                               goto err;
+                       }
+               }
+       }
+
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       return ret;
+
+err:
+       dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
+               stag, offset);
+       return -EINVAL;
+}
+EXPORT_SYMBOL(cxgb4_read_tpte);
+
+u64 cxgb4_read_sge_timestamp(struct net_device *dev)
+{
+       u32 hi, lo;
+       struct adapter *adap;
+
+       adap = netdev2adap(dev);
+       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
+       hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+
+       return ((u64)hi << 32) | (u64)lo;
+}
+EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
+
 static struct pci_driver cxgb4_driver;
 
 static void check_neigh_update(struct neighbour *neigh)
@@ -3899,6 +4155,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        unsigned short i;
 
        lli.pdev = adap->pdev;
+       lli.pf = adap->fn;
        lli.l2t = adap->l2t;
        lli.tids = &adap->tids;
        lli.ports = adap->port;
@@ -3919,6 +4176,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.wr_cred = adap->params.ofldq_wr_cred;
        lli.adapter_type = adap->params.chip;
        lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+       lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
        lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
                        t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
                        (adap->fn * 4));
@@ -3933,8 +4191,12 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
        lli.fw_vers = adap->params.fw_vers;
        lli.dbfifo_int_thresh = dbfifo_int_thresh;
+       lli.sge_ingpadboundary = adap->sge.fl_align;
+       lli.sge_egrstatuspagesize = adap->sge.stat_len;
        lli.sge_pktshift = adap->sge.pktshift;
        lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+       lli.max_ordird_qp = adap->params.max_ordird_qp;
+       lli.max_ird_adapter = adap->params.max_ird_adapter;
        lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
 
        handle = ulds[uld].add(&lli);
@@ -4598,6 +4860,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_open             = cxgb_open,
        .ndo_stop             = cxgb_close,
        .ndo_start_xmit       = t4_eth_xmit,
+       .ndo_select_queue     = cxgb_select_queue,
        .ndo_get_stats64      = cxgb_get_stats,
        .ndo_set_rx_mode      = cxgb_set_rxmode,
        .ndo_set_mac_address  = cxgb_set_mac_addr,
@@ -4617,20 +4880,75 @@ void t4_fatal_err(struct adapter *adap)
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
 
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+       struct fw_ldst_cmd ldst_cmd;
+       u32 val;
+       int ret;
+
+       /* Construct and send the Firmware LDST Command to retrieve the
+        * specified PCI-E Configuration Space register.
+        */
+       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+       ldst_cmd.op_to_addrspace =
+               htonl(FW_CMD_OP(FW_LDST_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ |
+                     FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+       ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+       ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+       ldst_cmd.u.pcie.ctrl_to_fn =
+               (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
+       ldst_cmd.u.pcie.r = reg;
+       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+                        &ldst_cmd);
+
+       /* If the LDST Command suucceeded, exctract the returned register
+        * value.  Otherwise read it directly ourself.
+        */
+       if (ret == 0)
+               val = ntohl(ldst_cmd.u.pcie.data[0]);
+       else
+               t4_hw_pci_read_cfg4(adap, reg, &val);
+
+       return val;
+}
+
 static void setup_memwin(struct adapter *adap)
 {
-       u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
+       u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
 
-       bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
        if (is_t4(adap->params.chip)) {
+               u32 bar0;
+
+               /* Truncation intentional: we only read the bottom 32-bits of
+                * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
+                * mechanism to read BAR0 instead of using
+                * pci_resource_start() because we could be operating from
+                * within a Virtual Machine which is trapping our accesses to
+                * our Configuration Space and we need to set up the PCI-E
+                * Memory Window decoders with the actual addresses which will
+                * be coming across the PCI-E link.
+                */
+               bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
+               bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+               adap->t4_bar0 = bar0;
+
                mem_win0_base = bar0 + MEMWIN0_BASE;
                mem_win1_base = bar0 + MEMWIN1_BASE;
                mem_win2_base = bar0 + MEMWIN2_BASE;
+               mem_win2_aperture = MEMWIN2_APERTURE;
        } else {
                /* For T5, only relative offset inside the PCIe BAR is passed */
                mem_win0_base = MEMWIN0_BASE;
-               mem_win1_base = MEMWIN1_BASE_T5;
+               mem_win1_base = MEMWIN1_BASE;
                mem_win2_base = MEMWIN2_BASE_T5;
+               mem_win2_aperture = MEMWIN2_APERTURE_T5;
        }
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
                     mem_win0_base | BIR(0) |
@@ -4640,16 +4958,19 @@ static void setup_memwin(struct adapter *adap)
                     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
                     mem_win2_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+                    WINDOW(ilog2(mem_win2_aperture) - 10));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
 {
        if (adap->vres.ocq.size) {
-               unsigned int start, sz_kb;
+               u32 start;
+               unsigned int sz_kb;
 
-               start = pci_resource_start(adap->pdev, 2) +
-                       OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+               start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
+               start &= PCI_BASE_ADDRESS_MEM_MASK;
+               start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
                t4_write_reg(adap,
                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
@@ -4862,7 +5183,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                              adapter->fn, 0, 1, params, val);
                        if (ret == 0) {
                                /*
-                                * For t4_memory_write() below addresses and
+                                * For t4_memory_rw() below addresses and
                                 * sizes have to be in terms of multiples of 4
                                 * bytes.  So, if the Configuration File isn't
                                 * a multiple of 4 bytes in length we'll have
@@ -4878,8 +5199,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
                                maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
 
-                               ret = t4_memory_write(adapter, mtype, maddr,
-                                                     size, data);
+                               spin_lock(&adapter->win0_lock);
+                               ret = t4_memory_rw(adapter, 0, mtype, maddr,
+                                                  size, data, T4_MEMORY_WRITE);
                                if (ret == 0 && resid != 0) {
                                        union {
                                                __be32 word;
@@ -4890,10 +5212,12 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                        last.word = data[size >> 2];
                                        for (i = resid; i < 4; i++)
                                                last.buf[i] = 0;
-                                       ret = t4_memory_write(adapter, mtype,
-                                                             maddr + size,
-                                                             4, &last.word);
+                                       ret = t4_memory_rw(adapter, 0, mtype,
+                                                          maddr + size,
+                                                          4, &last.word,
+                                                          T4_MEMORY_WRITE);
                                }
+                               spin_unlock(&adapter->win0_lock);
                        }
                }
 
@@ -5637,6 +5961,22 @@ static int adap_init0(struct adapter *adap)
                adap->vres.cq.size = val[3] - val[2] + 1;
                adap->vres.ocq.start = val[4];
                adap->vres.ocq.size = val[5] - val[4] + 1;
+
+               params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
+               params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
+               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               if (ret < 0) {
+                       adap->params.max_ordird_qp = 8;
+                       adap->params.max_ird_adapter = 32 * adap->tids.ntids;
+                       ret = 0;
+               } else {
+                       adap->params.max_ordird_qp = val[0];
+                       adap->params.max_ird_adapter = val[1];
+               }
+               dev_info(adap->pdev_dev,
+                        "max_ordird_qp %d max_ird_adapter %d\n",
+                        adap->params.max_ordird_qp,
+                        adap->params.max_ird_adapter);
        }
        if (caps_cmd.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -5838,12 +6178,33 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
 static void cfg_queues(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       int i, q10g = 0, n10g = 0, qidx = 0;
+       int i, n10g = 0, qidx = 0;
+#ifndef CONFIG_CHELSIO_T4_DCB
+       int q10g = 0;
+#endif
        int ciq_size;
 
        for_each_port(adap, i)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* For Data Center Bridging support we need to be able to support up
+        * to 8 Traffic Priorities; each of which will be assigned to its
+        * own TX Queue in order to prevent Head-Of-Line Blocking.
+        */
+       if (adap->params.nports * 8 > MAX_ETH_QSETS) {
+               dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
+                       MAX_ETH_QSETS, adap->params.nports * 8);
+               BUG_ON(1);
+       }
 
+       for_each_port(adap, i) {
+               struct port_info *pi = adap2pinfo(adap, i);
+
+               pi->first_qset = qidx;
+               pi->nqsets = 8;
+               qidx += pi->nqsets;
+       }
+#else /* !CONFIG_CHELSIO_T4_DCB */
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
         * per 10G port.
@@ -5860,6 +6221,7 @@ static void cfg_queues(struct adapter *adap)
                pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
                qidx += pi->nqsets;
        }
+#endif /* !CONFIG_CHELSIO_T4_DCB */
 
        s->ethqsets = qidx;
        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
@@ -5978,8 +6340,14 @@ static int enable_msix(struct adapter *adap)
                /* need nchan for each possible ULD */
                ofld_need = 3 * nchan;
        }
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
+        * each port.
+        */
+       need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
+#else
        need = adap->params.nports + EXTRA_VECS + ofld_need;
-
+#endif
        want = pci_enable_msix_range(adap->pdev, entries, need, want);
        if (want < 0)
                return want;
@@ -6111,13 +6479,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
        }
 
-       /* We control everything through one PF */
-       func = PCI_FUNC(pdev->devfn);
-       if (func != ent->driver_data) {
-               pci_save_state(pdev);        /* to restore SR-IOV later */
-               goto sriov;
-       }
-
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "cannot enable PCI device\n");
@@ -6161,6 +6522,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_adapter;
        }
 
+       /* We control everything through one PF */
+       func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
+       if ((pdev->device == 0xa000 && func != 0) ||
+           func != ent->driver_data) {
+               pci_save_state(pdev);        /* to restore SR-IOV later */
+               err = 0;
+               goto out_unmap_bar0;
+       }
+
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
@@ -6242,6 +6612,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4_netdev_ops;
+#ifdef CONFIG_CHELSIO_T4_DCB
+               netdev->dcbnl_ops = &cxgb4_dcb_ops;
+               cxgb4_dcb_state_init(netdev);
+#endif
                netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
@@ -6320,7 +6694,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (is_offload(adapter))
                attach_ulds(adapter);
 
-sriov:
 #ifdef CONFIG_PCI_IOV
        if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
                if (pci_enable_sriov(pdev, num_vf[func]) == 0)
@@ -6366,8 +6739,7 @@ static void remove_one(struct pci_dev *pdev)
                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
                                unregister_netdev(adapter->port[i]);
 
-               if (adapter->debugfs_root)
-                       debugfs_remove_recursive(adapter->debugfs_root);
+               debugfs_remove_recursive(adapter->debugfs_root);
 
                /* If we allocated filters, free up state associated with any
                 * valid filters ...
index 55e9daf7f9d47f95ab19a83b363637b4c549990f..1366ba620c87cdf506e1bd0757d7062cd5bf605e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -172,6 +172,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned char port, unsigned char mask);
 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned int queue, bool ipv6);
+int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
+int cxgb4_clip_release(const struct net_device *dev,
+                      const struct in6_addr *lip);
+
 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
 {
        skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -243,6 +247,7 @@ struct cxgb4_lld_info {
        unsigned char fw_api_ver;            /* FW API version */
        unsigned int fw_vers;                /* FW version */
        unsigned int iscsi_iolen;            /* iSCSI max I/O length */
+       unsigned int cclk_ps;                /* Core clock period in psec */
        unsigned short udb_density;          /* # of user DB/page */
        unsigned short ucq_density;          /* # of user CQs/page */
        unsigned short filt_mode;            /* filter optional components */
@@ -251,10 +256,15 @@ struct cxgb4_lld_info {
        void __iomem *gts_reg;               /* address of GTS register */
        void __iomem *db_reg;                /* address of kernel doorbell */
        int dbfifo_int_thresh;               /* doorbell fifo int threshold */
+       unsigned int sge_ingpadboundary;     /* SGE ingress padding boundary */
+       unsigned int sge_egrstatuspagesize;  /* SGE egress status page size */
        unsigned int sge_pktshift;           /* Padding between CPL and */
                                             /* packet data */
+       unsigned int pf;                     /* Physical Function we're using */
        bool enable_fw_ofld_conn;            /* Enable connection through fw */
                                             /* WR */
+       unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
+       unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
        bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
 };
 
@@ -291,5 +301,7 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
 int cxgb4_flush_eq_cache(struct net_device *dev);
 void cxgb4_disable_db_coalescing(struct net_device *dev);
 void cxgb4_enable_db_coalescing(struct net_device *dev);
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
+u64 cxgb4_read_sge_timestamp(struct net_device *dev);
 
 #endif  /* !__CXGB4_OFLD_H */
index 8a96572fdde0abd54a1e32b7914b9c90da65a3b5..96041397ee15e657e23851be888f6e0f317ee46f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 85eb5c71358d80ad91e21df648289047165dbf39..a30126ce90cbabeaf50d7ed3fc5f1531c3831db9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index dd4355d248e4c4134313b637154ced4efdf3c96f..8bae1aa744a723e993e2b6cb1b41a842cfd39a2e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 931478e7bd284e9b21791ada6a172ea2f9c00e40..e76885236e9d70adb6eabdc399b90cb67638af3d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -143,6 +143,30 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
        }
 }
 
+/*
+ * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
+ * mechanism.  This guarantees that we get the real value even if we're
+ * operating within a Virtual Machine and the Hypervisor is trapping our
+ * Configuration Space accesses.
+ */
+void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
+{
+       u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+
+       if (is_t4(adap->params.chip))
+               req |= F_LOCALCFG;
+
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
+       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+
+       /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
+        * Configuration Space read.  (None of the other fields matter when
+        * ENABLE is 0 so a simple register write is easier than a
+        * read-modify-write via t4_set_reg_field().)
+        */
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -389,78 +413,41 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        return 0;
 }
 
-/*
- *     t4_mem_win_rw - read/write memory through PCIE memory window
- *     @adap: the adapter
- *     @addr: address of first byte requested
- *     @data: MEMWIN0_APERTURE bytes of data containing the requested address
- *     @dir: direction of transfer 1 => read, 0 => write
- *
- *     Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
- *     MEMWIN0_APERTURE-byte-aligned address that covers the requested
- *     address @addr.
- */
-static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
-{
-       int i;
-       u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
-
-       /*
-        * Setup offset into PCIE memory window.  Address must be a
-        * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
-        * ensure that changes propagate before we attempt to use the new
-        * values.)
-        */
-       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
-                    (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
-       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
-
-       /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
-       for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
-               if (dir)
-                       *data++ = (__force __be32) t4_read_reg(adap,
-                                                       (MEMWIN0_BASE + i));
-               else
-                       t4_write_reg(adap, (MEMWIN0_BASE + i),
-                                    (__force u32) *data++);
-       }
-
-       return 0;
-}
-
 /**
  *     t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  *     @adap: the adapter
+ *     @win: PCI-E Memory Window to use
  *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *     @addr: address within indicated memory type
  *     @len: amount of memory to transfer
  *     @buf: host memory buffer
- *     @dir: direction of transfer 1 => read, 0 => write
+ *     @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *     Reads/writes an [almost] arbitrary memory region in the firmware: the
- *     firmware memory address, length and host buffer must be aligned on
- *     32-bit boudaries.  The memory is transferred as a raw byte sequence
- *     from/to the firmware's memory.  If this memory contains data
- *     structures which contain multi-byte integers, it's the callers
- *     responsibility to perform appropriate byte order conversions.
+ *     firmware memory address and host buffer must be aligned on 32-bit
+ *     boudaries; the length may be arbitrary.  The memory is transferred as
+ *     a raw byte sequence from/to the firmware's memory.  If this memory
+ *     contains data structures which contain multi-byte integers, it's the
+ *     caller's responsibility to perform appropriate byte order conversions.
  */
-static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
-                       __be32 *buf, int dir)
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
+                u32 len, __be32 *buf, int dir)
 {
-       u32 pos, start, end, offset, memoffset;
-       u32 edc_size, mc_size;
-       int ret = 0;
-       __be32 *data;
+       u32 pos, offset, resid, memoffset;
+       u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
 
-       /*
-        * Argument sanity checks ...
+       /* Argument sanity checks ...
         */
-       if ((addr & 0x3) || (len & 0x3))
+       if (addr & 0x3)
                return -EINVAL;
 
-       data = vmalloc(MEMWIN0_APERTURE);
-       if (!data)
-               return -ENOMEM;
+       /* It's convenient to be able to handle lengths which aren't a
+        * multiple of 32-bits because we often end up transferring files to
+        * the firmware.  So we'll handle that by normalizing the length here
+        * and then handling any residual transfer at the end.
+        */
+       resid = len & 0x3;
+       len -= resid;
 
        /* Offset into the region of memory which is being accessed
         * MEM_EDC0 = 0
@@ -481,66 +468,98 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
        /* Determine the PCIE_MEM_ACCESS_OFFSET */
        addr = addr + memoffset;
 
-       /*
-        * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
-        * at a time so we need to round down the start and round up the end.
-        * We'll start copying out of the first line at (addr - start) a word
-        * at a time.
+       /* Each PCI-E Memory Window is programmed with a window size -- or
+        * "aperture" -- which controls the granularity of its mapping onto
+        * adapter memory.  We need to grab that aperture in order to know
+        * how to use the specified window.  The window is also programmed
+        * with the base address of the Memory Window in BAR0's address
+        * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
+        * the address is relative to BAR0.
         */
-       start = addr & ~(MEMWIN0_APERTURE-1);
-       end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
-       offset = (addr - start)/sizeof(__be32);
+       mem_reg = t4_read_reg(adap,
+                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+                                                 win));
+       mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
+       mem_base = GET_PCIEOFST(mem_reg) << 10;
+       if (is_t4(adap->params.chip))
+               mem_base -= adap->t4_bar0;
+       win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
 
-       for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+       /* Calculate our initial PCI-E Memory Window Position and Offset into
+        * that Window.
+        */
+       pos = addr & ~(mem_aperture-1);
+       offset = addr - pos;
 
-               /*
-                * If we're writing, copy the data from the caller's memory
-                * buffer
+       /* Set up initial PCI-E Memory Window to cover the start of our
+        * transfer.  (Read it back to ensure that changes propagate before we
+        * attempt to use the new value.)
+        */
+       t4_write_reg(adap,
+                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+                    pos | win_pf);
+       t4_read_reg(adap,
+                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+       /* Transfer data to/from the adapter as long as there's an integral
+        * number of 32-bit transfers to complete.
+        */
+       while (len > 0) {
+               if (dir == T4_MEMORY_READ)
+                       *buf++ = (__force __be32) t4_read_reg(adap,
+                                                       mem_base + offset);
+               else
+                       t4_write_reg(adap, mem_base + offset,
+                                    (__force u32) *buf++);
+               offset += sizeof(__be32);
+               len -= sizeof(__be32);
+
+               /* If we've reached the end of our current window aperture,
+                * move the PCI-E Memory Window on to the next.  Note that
+                * doing this here after "len" may be 0 allows us to set up
+                * the PCI-E Memory Window for a possible final residual
+                * transfer below ...
                 */
-               if (!dir) {
-                       /*
-                        * If we're doing a partial write, then we need to do
-                        * a read-modify-write ...
-                        */
-                       if (offset || len < MEMWIN0_APERTURE) {
-                               ret = t4_mem_win_rw(adap, pos, data, 1);
-                               if (ret)
-                                       break;
-                       }
-                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
-                              len > 0) {
-                               data[offset++] = *buf++;
-                               len -= sizeof(__be32);
-                       }
+               if (offset == mem_aperture) {
+                       pos += mem_aperture;
+                       offset = 0;
+                       t4_write_reg(adap,
+                                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
+                                                        win), pos | win_pf);
+                       t4_read_reg(adap,
+                                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
+                                                       win));
                }
-
-               /*
-                * Transfer a block of memory and bail if there's an error.
-                */
-               ret = t4_mem_win_rw(adap, pos, data, dir);
-               if (ret)
-                       break;
-
-               /*
-                * If we're reading, copy the data into the caller's memory
-                * buffer.
-                */
-               if (dir)
-                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
-                              len > 0) {
-                               *buf++ = data[offset++];
-                               len -= sizeof(__be32);
-                       }
        }
 
-       vfree(data);
-       return ret;
-}
+       /* If the original transfer had a length which wasn't a multiple of
+        * 32-bits, now's where we need to finish off the transfer of the
+        * residual amount.  The PCI-E Memory Window has already been moved
+        * above (if necessary) to cover this final transfer.
+        */
+       if (resid) {
+               union {
+                       __be32 word;
+                       char byte[4];
+               } last;
+               unsigned char *bp;
+               int i;
+
+               if (dir == T4_MEMORY_WRITE) {
+                       last.word = (__force __be32) t4_read_reg(adap,
+                                                       mem_base + offset);
+                       for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+                               bp[i] = last.byte[i];
+               } else {
+                       last.word = *buf;
+                       for (i = resid; i < 4; i++)
+                               last.byte[i] = 0;
+                       t4_write_reg(adap, mem_base + offset,
+                                    (__force u32) last.word);
+               }
+       }
 
-int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
-                   __be32 *buf)
-{
-       return t4_memory_rw(adap, mtype, addr, len, buf, 0);
+       return 0;
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
@@ -1700,16 +1719,24 @@ static void mps_intr_handler(struct adapter *adapter)
  */
 static void mem_intr_handler(struct adapter *adapter, int idx)
 {
-       static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+       static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
 
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
                addr = EDC_REG(EDC_INT_CAUSE, idx);
                cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+       } else if (idx == MEM_MC) {
+               if (is_t4(adapter->params.chip)) {
+                       addr = MC_INT_CAUSE;
+                       cnt_addr = MC_ECC_STATUS;
+               } else {
+                       addr = MC_P_INT_CAUSE;
+                       cnt_addr = MC_P_ECC_STATUS;
+               }
        } else {
-               addr = MC_INT_CAUSE;
-               cnt_addr = MC_ECC_STATUS;
+               addr = MC_REG(MC_P_INT_CAUSE, 1);
+               cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
        }
 
        v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
@@ -1873,6 +1900,8 @@ int t4_slow_intr_handler(struct adapter *adapter)
                pcie_intr_handler(adapter);
        if (cause & MC)
                mem_intr_handler(adapter, MEM_MC);
+       if (!is_t4(adapter->params.chip) && (cause & MC1))
+               mem_intr_handler(adapter, MEM_MC1);
        if (cause & EDC0)
                mem_intr_handler(adapter, MEM_EDC0);
        if (cause & EDC1)
@@ -2504,39 +2533,6 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
-/**
- *     t4_mem_win_read_len - read memory through PCIE memory window
- *     @adap: the adapter
- *     @addr: address of first byte requested aligned on 32b.
- *     @data: len bytes to hold the data read
- *     @len: amount of data to read from window.  Must be <=
- *            MEMWIN0_APERATURE after adjusting for 16B for T4 and
- *            128B for T5 alignment requirements of the the memory window.
- *
- *     Read len bytes of data from MC starting at @addr.
- */
-int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
-{
-       int i, off;
-       u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
-
-       /* Align on a 2KB boundary.
-        */
-       off = addr & MEMWIN0_APERTURE;
-       if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
-               return -EINVAL;
-
-       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
-                    (addr & ~MEMWIN0_APERTURE) | win_pf);
-       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
-
-       for (i = 0; i < len; i += 4)
-               *data++ = (__force __be32) t4_read_reg(adap,
-                                               (MEMWIN0_BASE + off + i));
-
-       return 0;
-}
-
 /**
  *     t4_mdio_rd - read a PHY register through MDIO
  *     @adap: the adapter
@@ -3174,6 +3170,46 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
        return ret;
 }
 
+/**
+ *      t4_set_params_nosleep - sets FW or device parameters
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @pf: the PF
+ *      @vf: the VF
+ *      @nparams: the number of parameters
+ *      @params: the parameter names
+ *      @val: the parameter values
+ *
+ *      Does not ever sleep
+ *      Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *      specified at once.
+ */
+int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+                         unsigned int pf, unsigned int vf,
+                         unsigned int nparams, const u32 *params,
+                         const u32 *val)
+{
+       struct fw_params_cmd c;
+       __be32 *p = &c.param[0].mnem;
+
+       if (nparams > 7)
+               return -EINVAL;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+                               FW_CMD_REQUEST | FW_CMD_WRITE |
+                               FW_PARAMS_CMD_PFN(pf) |
+                               FW_PARAMS_CMD_VFN(vf));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+       while (nparams--) {
+               *p++ = cpu_to_be32(*params++);
+               *p++ = cpu_to_be32(*val++);
+       }
+
+       return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_set_params - sets FW or device parameters
  *     @adap: the adapter
@@ -3498,6 +3534,33 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
+/**
+ *      t4_enable_vi_params - enable/disable a virtual interface
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @viid: the VI id
+ *      @rx_en: 1=enable Rx, 0=disable Rx
+ *      @tx_en: 1=enable Tx, 0=disable Tx
+ *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ *      Enables/disables a virtual interface.  Note that setting DCB Enable
+ *      only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+                       unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+       struct fw_vi_enable_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+                            FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+
+       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
+                              FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
+                              FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_enable_vi - enable/disable a virtual interface
  *     @adap: the adapter
@@ -3511,14 +3574,7 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
                 bool rx_en, bool tx_en)
 {
-       struct fw_vi_enable_cmd c;
-
-       memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
-                            FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
-       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
-                              FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
-       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+       return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
 }
 
 /**
index 71b799b5b0f499d050237c82788903b87dbb63bb..35e3d8e3288162485d442a16e0f4435c5109d998 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 973eb11aa98a06dbcc66f343c96063954ae9b90a..0259feeab1b3a70b4f09f9011c01625b36db6e70 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -270,12 +270,15 @@ struct cpl_pass_accept_rpl {
 #define RX_COALESCE_VALID(x) ((x) << 11)
 #define RX_COALESCE(x)       ((x) << 12)
 #define PACE(x)              ((x) << 16)
+#define RX_FC_VALID         ((1U) << 19)
+#define RX_FC_DISABLE       ((1U) << 20)
 #define TX_QUEUE(x)          ((x) << 23)
 #define RX_CHANNEL(x)        ((x) << 26)
 #define CCTRL_ECN(x)         ((x) << 27)
 #define WND_SCALE_EN(x)      ((x) << 28)
 #define TSTAMPS_EN(x)        ((x) << 29)
 #define SACK_EN(x)           ((x) << 30)
+#define T5_OPT_2_VALID      ((1U) << 31)
        __be64 opt0;
 };
 
index 225ad8a5722de026bf0302b4f4f49886100e66f5..e3146e83df2043ae59436e7eff6b8c276a3fb3e4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
 #define F_NOCOALESCE    V_NOCOALESCE(1U)
 
+#define SGE_TIMESTAMP_LO 0x1098
+#define SGE_TIMESTAMP_HI 0x109c
+#define S_TSVAL    0
+#define M_TSVAL    0xfffffffU
+#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
+
 #define SGE_TIMER_VALUE_0_AND_1 0x10b8
 #define  TIMERVALUE0_MASK   0xffff0000U
 #define  TIMERVALUE0_SHIFT  16
 #define  MSTGRPPERR      0x00000001U
 
 #define PCIE_NONFAT_ERR 0x3010
+#define PCIE_CFG_SPACE_REQ 0x3060
+#define PCIE_CFG_SPACE_DATA 0x3064
 #define PCIE_MEM_ACCESS_BASE_WIN 0x3068
 #define S_PCIEOFST       10
 #define M_PCIEOFST       0x3fffffU
 #define  WINDOW_MASK     0x000000ffU
 #define  WINDOW_SHIFT    0
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
+#define  GET_WINDOW(x)  (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
 #define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define ENABLE (1U << 30)
+#define FUNCTION(x) ((x) << 12)
+#define F_LOCALCFG    (1U << 28)
 
 #define S_PFNUM    0
 #define V_PFNUM(x) ((x) << S_PFNUM)
 #define  TDUE 0x00010000U
 
 #define MC_INT_CAUSE 0x7518
+#define MC_P_INT_CAUSE 0x41318
 #define  ECC_UE_INT_CAUSE 0x00000004U
 #define  ECC_CE_INT_CAUSE 0x00000002U
 #define  PERR_INT_CAUSE   0x00000001U
 
 #define MC_ECC_STATUS 0x751c
+#define MC_P_ECC_STATUS 0x4131c
 #define  ECC_CECNT_MASK   0xffff0000U
 #define  ECC_CECNT_SHIFT  16
 #define  ECC_CECNT(x)     ((x) << ECC_CECNT_SHIFT)
 #define  I2CM       0x00000002U
 #define  CIM        0x00000001U
 
+#define MC1 0x31
 #define PL_INT_ENABLE 0x19410
 #define PL_INT_MAP0 0x19414
 #define PL_RST 0x19428
index 9cc973fbcf26761b322810fa91dcf61f528b3fc7..ff709e3b3e7e43371518222d93a5119dcd3b185d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -46,9 +46,11 @@ enum fw_retval {
        FW_EFAULT               = 14,   /* bad address; fw bad */
        FW_EBUSY                = 16,   /* resource busy */
        FW_EEXIST               = 17,   /* file exists */
+       FW_ENODEV               = 19,   /* no such device */
        FW_EINVAL               = 22,   /* invalid argument */
        FW_ENOSPC               = 28,   /* no space left on device */
        FW_ENOSYS               = 38,   /* functionality not implemented */
+       FW_ENODATA              = 61,   /* no data available */
        FW_EPROTO               = 71,   /* protocol error */
        FW_EADDRINUSE           = 98,   /* address already in use */
        FW_EADDRNOTAVAIL        = 99,   /* cannot assigned requested address */
@@ -932,6 +934,8 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
        FW_PARAMS_PARAM_DEV_CF = 0x0D,
+       FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
+       FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
        FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
 };
 
@@ -989,6 +993,7 @@ enum fw_params_param_dmaq {
        FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
        FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
        FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+       FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
 };
 
 #define FW_PARAMS_MNEM(x)      ((x) << 24)
@@ -1422,6 +1427,7 @@ struct fw_vi_enable_cmd {
 #define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
 #define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
 #define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
+#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28)
 #define FW_VI_ENABLE_CMD_LED (1U << 29)
 
 /* VI VF stats offset definitions */
@@ -1594,6 +1600,9 @@ enum fw_port_action {
        FW_PORT_ACTION_GET_PORT_INFO    = 0x0003,
        FW_PORT_ACTION_L2_PPP_CFG       = 0x0004,
        FW_PORT_ACTION_L2_DCB_CFG       = 0x0005,
+       FW_PORT_ACTION_DCB_READ_TRANS   = 0x0006,
+       FW_PORT_ACTION_DCB_READ_RECV    = 0x0007,
+       FW_PORT_ACTION_DCB_READ_DET     = 0x0008,
        FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
        FW_PORT_ACTION_L1_LOW_PWR_EN    = 0x0011,
        FW_PORT_ACTION_L2_WOL_MODE_EN   = 0x0012,
@@ -1637,6 +1646,14 @@ enum fw_port_dcb_type {
        FW_PORT_DCB_TYPE_PRIORATE       = 0x02,
        FW_PORT_DCB_TYPE_PFC            = 0x03,
        FW_PORT_DCB_TYPE_APP_ID         = 0x04,
+       FW_PORT_DCB_TYPE_CONTROL        = 0x05,
+};
+
+enum fw_port_dcb_feature_state {
+       FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0,
+       FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1,
+       FW_PORT_DCB_FEATURE_STATE_ERROR = 0x2,
+       FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3,
 };
 
 struct fw_port_cmd {
@@ -1648,9 +1665,11 @@ struct fw_port_cmd {
                        __be32 r;
                } l1cfg;
                struct fw_port_l2cfg {
-                       __be16 ctlbf_to_ivlan0;
+                       __u8   ctlbf;
+                       __u8   ovlan3_to_ivlan0;
                        __be16 ivlantype;
-                       __be32 txipg_pkd;
+                       __be16 txipg_force_pinfo;
+                       __be16 mtu;
                        __be16 ovlan0mask;
                        __be16 ovlan0type;
                        __be16 ovlan1mask;
@@ -1666,24 +1685,60 @@ struct fw_port_cmd {
                        __be16 acap;
                        __be16 mtu;
                        __u8   cbllen;
-                       __u8   r9;
-                       __be32 r10;
-                       __be64 r11;
+                       __u8   auxlinfo;
+                       __u8   dcbxdis_pkd;
+                       __u8   r8_lo[3];
+                       __be64 r9;
                } info;
-               struct fw_port_ppp {
-                       __be32 pppen_to_ncsich;
-                       __be32 r11;
-               } ppp;
-               struct fw_port_dcb {
-                       __be16 cfg;
-                       u8 up_map;
-                       u8 sf_cfgrc;
-                       __be16 prot_ix;
-                       u8 pe7_to_pe0;
-                       u8 numTCPFCs;
-                       __be32 pgid0_to_pgid7;
-                       __be32 numTCs_oui;
-                       u8 pgpc[8];
+               struct fw_port_diags {
+                       __u8   diagop;
+                       __u8   r[3];
+                       __be32 diagval;
+               } diags;
+               union fw_port_dcb {
+                       struct fw_port_dcb_pgid {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[2];
+                               __be32 pgid;
+                               __be64 r11;
+                       } pgid;
+                       struct fw_port_dcb_pgrate {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[5];
+                               __u8   num_tcs_supported;
+                               __u8   pgrate[8];
+                       } pgrate;
+                       struct fw_port_dcb_priorate {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[6];
+                               __u8   strict_priorate[8];
+                       } priorate;
+                       struct fw_port_dcb_pfc {
+                               __u8   type;
+                               __u8   pfcen;
+                               __u8   r10[5];
+                               __u8   max_pfc_tcs;
+                               __be64 r11;
+                       } pfc;
+                       struct fw_port_app_priority {
+                               __u8   type;
+                               __u8   r10[2];
+                               __u8   idx;
+                               __u8   user_prio_map;
+                               __u8   sel_field;
+                               __be16 protocolid;
+                               __be64 r12;
+                       } app_priority;
+                       struct fw_port_dcb_control {
+                               __u8   type;
+                               __u8   all_syncd_pkd;
+                               __be16 pfc_state_to_app_state;
+                               __be32 r11;
+                               __be64 r12;
+                       } control;
                } dcb;
        } u;
 };
@@ -1720,6 +1775,10 @@ struct fw_port_cmd {
 #define FW_PORT_CMD_MODTYPE_MASK 0x1f
 #define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
 
+#define FW_PORT_CMD_DCBXDIS (1U << 7)
+#define FW_PORT_CMD_APPLY (1U <<  7)
+#define FW_PORT_CMD_ALL_SYNCD (1U << 7)
+
 #define FW_PORT_CMD_PPPEN(x) ((x) << 31)
 #define FW_PORT_CMD_TPSRC(x) ((x) << 28)
 #define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
index ff1cdd1788b5f62efdf03ffd2f501b65054383a9..f002af190a65b9c480bed607459fbf9aae3fac42 100644 (file)
@@ -2924,6 +2924,15 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
        CH_DEVICE(0x480a, 0),   /* T404-bt */
        CH_DEVICE(0x480d, 0),   /* T480-cr */
        CH_DEVICE(0x480e, 0),   /* T440-lp-cr */
+       CH_DEVICE(0x4880, 0),
+       CH_DEVICE(0x4880, 1),
+       CH_DEVICE(0x4880, 2),
+       CH_DEVICE(0x4880, 3),
+       CH_DEVICE(0x4880, 4),
+       CH_DEVICE(0x4880, 5),
+       CH_DEVICE(0x4880, 6),
+       CH_DEVICE(0x4880, 7),
+       CH_DEVICE(0x4880, 8),
        CH_DEVICE(0x5800, 0),   /* T580-dbg */
        CH_DEVICE(0x5801, 0),   /* T520-cr */
        CH_DEVICE(0x5802, 0),   /* T522-cr */
index 239e1e46545de438a2482c5c0a721de9800335cc..aadcaf7876ceff9ee8cc0b3c57d58630b261b62f 100644 (file)
@@ -2,5 +2,5 @@ obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
        enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
-       enic_ethtool.o enic_api.o
+       enic_ethtool.o enic_api.o enic_clsf.o
 
index 14f465f239d65d8c791618c3961b03cd6ef79fed..962510f391dfcb3b8e041dd5ad89391b093c7bb4 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.50"
+#define DRV_VERSION            "2.1.1.67"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
@@ -99,6 +99,41 @@ struct enic_port_profile {
        u8 mac_addr[ETH_ALEN];
 };
 
+/* enic_rfs_fltr_node - rfs filter node in hash table
+ *     @@keys: IPv4 5 tuple
+ *     @flow_id: flow_id of clsf filter provided by kernel
+ *     @fltr_id: filter id of clsf filter returned by adaptor
+ *     @rq_id: desired rq index
+ *     @node: hlist_node
+ */
+struct enic_rfs_fltr_node {
+       struct flow_keys keys;
+       u32 flow_id;
+       u16 fltr_id;
+       u16 rq_id;
+       struct hlist_node node;
+};
+
+/* enic_rfs_flw_tbl - rfs flow table
+ *     @max: Maximum number of filters vNIC supports
+ *     @free: Number of free filters available
+ *     @toclean: hash table index to clean next
+ *     @ht_head: hash table list head
+ *     @lock: spin lock
+ *     @rfs_may_expire: timer function for enic_rps_may_expire_flow
+ */
+struct enic_rfs_flw_tbl {
+       u16 max;
+       int free;
+
+#define ENIC_RFS_FLW_BITSHIFT  (10)
+#define ENIC_RFS_FLW_MASK      ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
+       u16 toclean:ENIC_RFS_FLW_BITSHIFT;
+       struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
+       spinlock_t lock;
+       struct timer_list rfs_may_expire;
+};
+
 /* Per-instance private data structure */
 struct enic {
        struct net_device *netdev;
@@ -140,7 +175,7 @@ struct enic {
        unsigned int rq_count;
        u64 rq_truncated_pkts;
        u64 rq_bad_fcs;
-       struct napi_struct napi[ENIC_RQ_MAX];
+       struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
 
        /* interrupt resource cache line section */
        ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
@@ -150,6 +185,7 @@ struct enic {
        /* completion queue cache line section */
        ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
        unsigned int cq_count;
+       struct enic_rfs_flw_tbl rfs_h;
 };
 
 static inline struct device *enic_get_dev(struct enic *enic)
index e13efbdaa2ed318c2f5c59315f4018ca5a449935..b161f24522b8735af982da6c8264396b3c59c5d8 100644 (file)
@@ -34,13 +34,13 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
        struct vnic_dev *vdev = enic->vdev;
 
        spin_lock(&enic->enic_api_lock);
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
 
        vnic_dev_cmd_proxy_by_index_start(vdev, vf);
        err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
        vnic_dev_cmd_proxy_end(vdev);
 
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
        spin_unlock(&enic->enic_api_lock);
 
        return err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
new file mode 100644 (file)
index 0000000..69dfd3c
--- /dev/null
@@ -0,0 +1,284 @@
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_link.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/flow_keys.h>
+#include "enic_res.h"
+#include "enic_clsf.h"
+
+/* enic_addfltr_5t - Add ipv4 5tuple filter
+ *     @enic: enic struct of vnic
+ *     @keys: flow_keys of ipv4 5tuple
+ *     @rq: rq number to steer to
+ *
+ * This function returns filter_id(hardware_id) of the filter
+ * added. In case of error it returns an negative number.
+ */
+int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
+{
+       int res;
+       struct filter data;
+
+       switch (keys->ip_proto) {
+       case IPPROTO_TCP:
+               data.u.ipv4.protocol = PROTO_TCP;
+               break;
+       case IPPROTO_UDP:
+               data.u.ipv4.protocol = PROTO_UDP;
+               break;
+       default:
+               return -EPROTONOSUPPORT;
+       };
+       data.type = FILTER_IPV4_5TUPLE;
+       data.u.ipv4.src_addr = ntohl(keys->src);
+       data.u.ipv4.dst_addr = ntohl(keys->dst);
+       data.u.ipv4.src_port = ntohs(keys->port16[0]);
+       data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+       data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+
+       spin_lock_bh(&enic->devcmd_lock);
+       res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
+       spin_unlock_bh(&enic->devcmd_lock);
+       res = (res == 0) ? rq : res;
+
+       return res;
+}
+
+/* enic_delfltr - Delete clsf filter
+ *     @enic: enic struct of vnic
+ *     @filter_id: filter_is(hardware_id) of filter to be deleted
+ *
+ * This function returns zero in case of success, negative number incase of
+ * error.
+ */
+int enic_delfltr(struct enic *enic, u16 filter_id)
+{
+       int ret;
+
+       spin_lock_bh(&enic->devcmd_lock);
+       ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
+       spin_unlock_bh(&enic->devcmd_lock);
+
+       return ret;
+}
+
+/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
+ *     @enic: enic data
+ */
+void enic_rfs_flw_tbl_init(struct enic *enic)
+{
+       int i;
+
+       spin_lock_init(&enic->rfs_h.lock);
+       for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
+               INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
+       enic->rfs_h.max = enic->config.num_arfs;
+       enic->rfs_h.free = enic->rfs_h.max;
+       enic->rfs_h.toclean = 0;
+       enic_rfs_timer_start(enic);
+}
+
+void enic_rfs_flw_tbl_free(struct enic *enic)
+{
+       int i;
+
+       enic_rfs_timer_stop(enic);
+       spin_lock(&enic->rfs_h.lock);
+       enic->rfs_h.free = 0;
+       for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[i];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       enic_delfltr(enic, n->fltr_id);
+                       hlist_del(&n->node);
+                       kfree(n);
+               }
+       }
+       spin_unlock(&enic->rfs_h.lock);
+}
+
+struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
+{
+       int i;
+
+       for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[i];
+               hlist_for_each_entry_safe(n, tmp, hhead, node)
+                       if (n->fltr_id == fltr_id)
+                               return n;
+       }
+
+       return NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+void enic_flow_may_expire(unsigned long data)
+{
+       struct enic *enic = (struct enic *)data;
+       bool res;
+       int j;
+
+       spin_lock(&enic->rfs_h.lock);
+       for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       res = rps_may_expire_flow(enic->netdev, n->rq_id,
+                                                 n->flow_id, n->fltr_id);
+                       if (res) {
+                               res = enic_delfltr(enic, n->fltr_id);
+                               if (unlikely(res))
+                                       continue;
+                               hlist_del(&n->node);
+                               kfree(n);
+                               enic->rfs_h.free++;
+                       }
+               }
+       }
+       spin_unlock(&enic->rfs_h.lock);
+       mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
+                                                 struct flow_keys *k)
+{
+       struct enic_rfs_fltr_node *tpos;
+
+       hlist_for_each_entry(tpos, h, node)
+               if (tpos->keys.src == k->src &&
+                   tpos->keys.dst == k->dst &&
+                   tpos->keys.ports == k->ports &&
+                   tpos->keys.ip_proto == k->ip_proto &&
+                   tpos->keys.n_proto == k->n_proto)
+                       return tpos;
+       return NULL;
+}
+
+int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id)
+{
+       struct flow_keys keys;
+       struct enic_rfs_fltr_node *n;
+       struct enic *enic;
+       u16 tbl_idx;
+       int res, i;
+
+       enic = netdev_priv(dev);
+       res = skb_flow_dissect(skb, &keys);
+       if (!res || keys.n_proto != htons(ETH_P_IP) ||
+           (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+               return -EPROTONOSUPPORT;
+
+       tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
+       spin_lock(&enic->rfs_h.lock);
+       n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
+
+       if (n) { /* entry already present  */
+               if (rxq_index == n->rq_id) {
+                       res = -EEXIST;
+                       goto ret_unlock;
+               }
+
+               /* desired rq changed for the flow, we need to delete
+                * old fltr and add new one
+                *
+                * The moment we delete the fltr, the upcoming pkts
+                * are put it default rq based on rss. When we add
+                * new filter, upcoming pkts are put in desired queue.
+                * This could cause ooo pkts.
+                *
+                * Lets 1st try adding new fltr and then del old one.
+                */
+               i = --enic->rfs_h.free;
+               /* clsf tbl is full, we have to del old fltr first*/
+               if (unlikely(i < 0)) {
+                       enic->rfs_h.free++;
+                       res = enic_delfltr(enic, n->fltr_id);
+                       if (unlikely(res < 0))
+                               goto ret_unlock;
+                       res = enic_addfltr_5t(enic, &keys, rxq_index);
+                       if (res < 0) {
+                               hlist_del(&n->node);
+                               enic->rfs_h.free++;
+                               goto ret_unlock;
+                       }
+               /* add new fltr 1st then del old fltr */
+               } else {
+                       int ret;
+
+                       res = enic_addfltr_5t(enic, &keys, rxq_index);
+                       if (res < 0) {
+                               enic->rfs_h.free++;
+                               goto ret_unlock;
+                       }
+                       ret = enic_delfltr(enic, n->fltr_id);
+                       /* deleting old fltr failed. Add old fltr to list.
+                        * enic_flow_may_expire() will try to delete it later.
+                        */
+                       if (unlikely(ret < 0)) {
+                               struct enic_rfs_fltr_node *d;
+                               struct hlist_head *head;
+
+                               head = &enic->rfs_h.ht_head[tbl_idx];
+                               d = kmalloc(sizeof(*d), GFP_ATOMIC);
+                               if (d) {
+                                       d->fltr_id = n->fltr_id;
+                                       INIT_HLIST_NODE(&d->node);
+                                       hlist_add_head(&d->node, head);
+                               }
+                       } else {
+                               enic->rfs_h.free++;
+                       }
+               }
+               n->rq_id = rxq_index;
+               n->fltr_id = res;
+               n->flow_id = flow_id;
+       /* entry not present */
+       } else {
+               i = --enic->rfs_h.free;
+               if (i <= 0) {
+                       enic->rfs_h.free++;
+                       res = -EBUSY;
+                       goto ret_unlock;
+               }
+
+               n = kmalloc(sizeof(*n), GFP_ATOMIC);
+               if (!n) {
+                       res = -ENOMEM;
+                       enic->rfs_h.free++;
+                       goto ret_unlock;
+               }
+
+               res = enic_addfltr_5t(enic, &keys, rxq_index);
+               if (res < 0) {
+                       kfree(n);
+                       enic->rfs_h.free++;
+                       goto ret_unlock;
+               }
+               n->rq_id = rxq_index;
+               n->fltr_id = res;
+               n->flow_id = flow_id;
+               n->keys = keys;
+               INIT_HLIST_NODE(&n->node);
+               hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
+       }
+
+ret_unlock:
+       spin_unlock(&enic->rfs_h.lock);
+       return res;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
new file mode 100644 (file)
index 0000000..6aa9f89
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ENIC_CLSF_H_
+#define _ENIC_CLSF_H_
+
+#include "vnic_dev.h"
+#include "enic.h"
+
+#define ENIC_CLSF_EXPIRE_COUNT 128
+
+int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
+int enic_delfltr(struct enic *enic, u16 filter_id);
+void enic_rfs_flw_tbl_init(struct enic *enic);
+void enic_rfs_flw_tbl_free(struct enic *enic);
+struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
+
+#ifdef CONFIG_RFS_ACCEL
+int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id);
+void enic_flow_may_expire(unsigned long data);
+
+static inline void enic_rfs_timer_start(struct enic *enic)
+{
+       init_timer(&enic->rfs_h.rfs_may_expire);
+       enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
+       enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
+       mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+static inline void enic_rfs_timer_stop(struct enic *enic)
+{
+       del_timer_sync(&enic->rfs_h.rfs_may_expire);
+}
+#else
+static inline void enic_rfs_timer_start(struct enic *enic) {}
+static inline void enic_rfs_timer_stop(struct enic *enic) {}
+#endif /* CONFIG_RFS_ACCEL */
+
+#endif /* _ENIC_CLSF_H_ */
index 3e27df522847def5fd3e2c2634f8edbc98eccf49..87ddc44b590eb679630e0e8363e724a179cb3cde 100644 (file)
@@ -29,9 +29,9 @@ int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_fw_info(enic->vdev, fw_info);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -40,9 +40,9 @@ int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_stats_dump(enic->vdev, vstats);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -54,9 +54,9 @@ int enic_dev_add_station_addr(struct enic *enic)
        if (!is_valid_ether_addr(enic->netdev->dev_addr))
                return -EADDRNOTAVAIL;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -68,9 +68,9 @@ int enic_dev_del_station_addr(struct enic *enic)
        if (!is_valid_ether_addr(enic->netdev->dev_addr))
                return -EADDRNOTAVAIL;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -80,10 +80,10 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_packet_filter(enic->vdev, directed,
                multicast, broadcast, promisc, allmulti);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -92,9 +92,9 @@ int enic_dev_add_addr(struct enic *enic, const u8 *addr)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_add_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -103,9 +103,9 @@ int enic_dev_del_addr(struct enic *enic, const u8 *addr)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_del_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -114,9 +114,9 @@ int enic_dev_notify_unset(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_notify_unset(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -125,9 +125,9 @@ int enic_dev_hang_notify(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_hang_notify(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -136,10 +136,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
                IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -148,9 +148,9 @@ int enic_dev_enable(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable_wait(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -159,9 +159,9 @@ int enic_dev_disable(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_disable(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -170,9 +170,9 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_intr_coal_timer_info(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -181,9 +181,9 @@ int enic_vnic_dev_deinit(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_deinit(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -192,10 +192,10 @@ int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_init_prov2(enic->vdev,
                (u8 *)vp, vic_provinfo_size(vp));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -204,9 +204,9 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_deinit_done(enic->vdev, status);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -217,9 +217,9 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct enic *enic = netdev_priv(netdev);
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_add_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -230,9 +230,9 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct enic *enic = netdev_priv(netdev);
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_del_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -241,9 +241,9 @@ int enic_dev_enable2(struct enic *enic, int active)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable2(enic->vdev, active);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -252,9 +252,9 @@ int enic_dev_enable2_done(struct enic *enic, int *status)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable2_done(enic->vdev, status);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
index 36ea1ab25f6aa374cfc8ef9c6db4b69112592cb0..10bb970b2f352fb2ba0b23aa4f1374decdd60333 100644 (file)
@@ -28,7 +28,7 @@
  */
 #define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \
        do { \
-               spin_lock(&enic->devcmd_lock); \
+               spin_lock_bh(&enic->devcmd_lock); \
                if (enic_is_valid_vf(enic, vf)) { \
                        vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \
                        err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
@@ -36,7 +36,7 @@
                } else { \
                        err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
                } \
-               spin_unlock(&enic->devcmd_lock); \
+               spin_unlock_bh(&enic->devcmd_lock); \
        } while (0)
 
 int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
index 2e50b5489d204158e89b93dfdb7be09cfba2d8c9..523c9ceb04c020265b8ceabf3fbf209d9db34af6 100644 (file)
@@ -22,6 +22,7 @@
 #include "enic_res.h"
 #include "enic.h"
 #include "enic_dev.h"
+#include "enic_clsf.h"
 
 struct enic_stat {
        char name[ETH_GSTRING_LEN];
@@ -231,7 +232,7 @@ static int enic_set_coalesce(struct net_device *netdev,
                if (ecmd->use_adaptive_rx_coalesce      ||
                    ecmd->rx_coalesce_usecs_low         ||
                    ecmd->rx_coalesce_usecs_high)
-                       return -EOPNOTSUPP;
+                       return -EINVAL;
 
                intr = enic_legacy_io_intr();
                vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -243,34 +244,29 @@ static int enic_set_coalesce(struct net_device *netdev,
                if (ecmd->use_adaptive_rx_coalesce      ||
                    ecmd->rx_coalesce_usecs_low         ||
                    ecmd->rx_coalesce_usecs_high)
-                       return -EOPNOTSUPP;
+                       return -EINVAL;
 
                vnic_intr_coalescing_timer_set(&enic->intr[0],
                        tx_coalesce_usecs);
                break;
        case VNIC_DEV_INTR_MODE_MSIX:
+               if (ecmd->rx_coalesce_usecs_high &&
+                   (rx_coalesce_usecs_high <
+                    rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+                               return -EINVAL;
+
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
                                tx_coalesce_usecs);
                }
 
-               if (rxcoal->use_adaptive_rx_coalesce) {
-                       if (!ecmd->use_adaptive_rx_coalesce) {
-                               rxcoal->use_adaptive_rx_coalesce = 0;
-                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-                       }
-               } else {
-                       if (ecmd->use_adaptive_rx_coalesce)
-                               rxcoal->use_adaptive_rx_coalesce = 1;
-                       else
-                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-               }
+               rxcoal->use_adaptive_rx_coalesce =
+                                       !!ecmd->use_adaptive_rx_coalesce;
+               if (!rxcoal->use_adaptive_rx_coalesce)
+                       enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
 
                if (ecmd->rx_coalesce_usecs_high) {
-                       if (rx_coalesce_usecs_high <
-                           (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-                               return -EINVAL;
                        rxcoal->range_end = rx_coalesce_usecs_high;
                        rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
                        rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
@@ -287,6 +283,102 @@ static int enic_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
+static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       int j, ret = 0, cnt = 0;
+
+       cmd->data = enic->rfs_h.max - enic->rfs_h.free;
+       for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[j];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       if (cnt == cmd->rule_cnt)
+                               return -EMSGSIZE;
+                       rule_locs[cnt] = n->fltr_id;
+                       cnt++;
+               }
+       }
+       cmd->rule_cnt = cnt;
+
+       return ret;
+}
+
+static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct enic_rfs_fltr_node *n;
+
+       n = htbl_fltr_search(enic, (u16)fsp->location);
+       if (!n)
+               return -EINVAL;
+       switch (n->keys.ip_proto) {
+       case IPPROTO_TCP:
+               fsp->flow_type = TCP_V4_FLOW;
+               break;
+       case IPPROTO_UDP:
+               fsp->flow_type = UDP_V4_FLOW;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+       fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
+
+       fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+       fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
+
+       fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+       fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
+
+       fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+       fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
+
+       fsp->ring_cookie = n->rq_id;
+
+       return 0;
+}
+
+static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                         u32 *rule_locs)
+{
+       struct enic *enic = netdev_priv(dev);
+       int ret = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = enic->rq_count;
+               break;
+       case ETHTOOL_GRXCLSRLCNT:
+               spin_lock_bh(&enic->rfs_h.lock);
+               cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
+               cmd->data = enic->rfs_h.max;
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+               spin_lock_bh(&enic->rfs_h.lock);
+               ret = enic_grxclsrlall(enic, cmd, rule_locs);
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       case ETHTOOL_GRXCLSRULE:
+               spin_lock_bh(&enic->rfs_h.lock);
+               ret = enic_grxclsrule(enic, cmd);
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
 static const struct ethtool_ops enic_ethtool_ops = {
        .get_settings = enic_get_settings,
        .get_drvinfo = enic_get_drvinfo,
@@ -298,6 +390,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
        .get_ethtool_stats = enic_get_ethtool_stats,
        .get_coalesce = enic_get_coalesce,
        .set_coalesce = enic_set_coalesce,
+       .get_rxnfc = enic_get_rxnfc,
 };
 
 void enic_set_ethtool_ops(struct net_device *netdev)
index f32f828b7f3dc31490179210f86c72c5968d84f7..9348febc0743a477189ed36581e455a694b3d4d3 100644 (file)
 #include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
 #include <linux/ktime.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -49,6 +55,7 @@
 #include "enic.h"
 #include "enic_dev.h"
 #include "enic_pp.h"
+#include "enic_clsf.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
@@ -309,40 +316,15 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t enic_isr_msix_rq(int irq, void *data)
+static irqreturn_t enic_isr_msix(int irq, void *data)
 {
        struct napi_struct *napi = data;
 
-       /* schedule NAPI polling for RQ cleanup */
        napi_schedule(napi);
 
        return IRQ_HANDLED;
 }
 
-static irqreturn_t enic_isr_msix_wq(int irq, void *data)
-{
-       struct enic *enic = data;
-       unsigned int cq;
-       unsigned int intr;
-       unsigned int wq_work_to_do = -1; /* no limit */
-       unsigned int wq_work_done;
-       unsigned int wq_irq;
-
-       wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
-       cq = enic_cq_wq(enic, wq_irq);
-       intr = enic_msix_wq_intr(enic, wq_irq);
-
-       wq_work_done = vnic_cq_service(&enic->cq[cq],
-               wq_work_to_do, enic_wq_service, NULL);
-
-       vnic_intr_return_credits(&enic->intr[intr],
-               wq_work_done,
-               1 /* unmask intr */,
-               1 /* reset intr timer */);
-
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t enic_isr_msix_err(int irq, void *data)
 {
        struct enic *enic = data;
@@ -1049,10 +1031,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
 
-               if (netdev->features & NETIF_F_GRO)
-                       napi_gro_receive(&enic->napi[q_number], skb);
-               else
+               skb_mark_napi_id(skb, &enic->napi[rq->index]);
+               if (enic_poll_busy_polling(rq) ||
+                   !(netdev->features & NETIF_F_GRO))
                        netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&enic->napi[q_number], skb);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_intr_update_pkt_size(&cq->pkt_size_counter,
                                                  bytes_written);
@@ -1089,16 +1073,22 @@ static int enic_poll(struct napi_struct *napi, int budget)
        unsigned int  work_done, rq_work_done = 0, wq_work_done;
        int err;
 
-       /* Service RQ (first) and WQ
-        */
+       wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
+                                      enic_wq_service, NULL);
+
+       if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
+               if (wq_work_done > 0)
+                       vnic_intr_return_credits(&enic->intr[intr],
+                                                wq_work_done,
+                                                0 /* dont unmask intr */,
+                                                0 /* dont reset intr timer */);
+               return rq_work_done;
+       }
 
        if (budget > 0)
                rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
                        rq_work_to_do, enic_rq_service, NULL);
 
-       wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
-               wq_work_to_do, enic_wq_service, NULL);
-
        /* Accumulate intr event credits for this polling
         * cycle.  An intr event is the completion of a
         * a WQ or RQ packet.
@@ -1130,6 +1120,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                vnic_intr_unmask(&enic->intr[intr]);
        }
+       enic_poll_unlock_napi(&enic->rq[cq_rq]);
 
        return rq_work_done;
 }
@@ -1192,7 +1183,102 @@ static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
        pkt_size_counter->small_pkt_bytes_cnt = 0;
 }
 
-static int enic_poll_msix(struct napi_struct *napi, int budget)
+#ifdef CONFIG_RFS_ACCEL
+static void enic_free_rx_cpu_rmap(struct enic *enic)
+{
+       free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
+       enic->netdev->rx_cpu_rmap = NULL;
+}
+
+static void enic_set_rx_cpu_rmap(struct enic *enic)
+{
+       int i, res;
+
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
+               enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
+               if (unlikely(!enic->netdev->rx_cpu_rmap))
+                       return;
+               for (i = 0; i < enic->rq_count; i++) {
+                       res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
+                                              enic->msix_entry[i].vector);
+                       if (unlikely(res)) {
+                               enic_free_rx_cpu_rmap(enic);
+                               return;
+                       }
+               }
+       }
+}
+
+#else
+
+static void enic_free_rx_cpu_rmap(struct enic *enic)
+{
+}
+
+static void enic_set_rx_cpu_rmap(struct enic *enic)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int enic_busy_poll(struct napi_struct *napi)
+{
+       struct net_device *netdev = napi->dev;
+       struct enic *enic = netdev_priv(netdev);
+       unsigned int rq = (napi - &enic->napi[0]);
+       unsigned int cq = enic_cq_rq(enic, rq);
+       unsigned int intr = enic_msix_rq_intr(enic, rq);
+       unsigned int work_to_do = -1; /* clean all pkts possible */
+       unsigned int work_done;
+
+       if (!enic_poll_lock_poll(&enic->rq[rq]))
+               return LL_FLUSH_BUSY;
+       work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
+                                   enic_rq_service, NULL);
+
+       if (work_done > 0)
+               vnic_intr_return_credits(&enic->intr[intr],
+                                        work_done, 0, 0);
+       vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               enic_calc_int_moderation(enic, &enic->rq[rq]);
+       enic_poll_unlock_poll(&enic->rq[rq]);
+
+       return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
+{
+       struct net_device *netdev = napi->dev;
+       struct enic *enic = netdev_priv(netdev);
+       unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
+       struct vnic_wq *wq = &enic->wq[wq_index];
+       unsigned int cq;
+       unsigned int intr;
+       unsigned int wq_work_to_do = -1; /* clean all desc possible */
+       unsigned int wq_work_done;
+       unsigned int wq_irq;
+
+       wq_irq = wq->index;
+       cq = enic_cq_wq(enic, wq_irq);
+       intr = enic_msix_wq_intr(enic, wq_irq);
+       wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
+                                      enic_wq_service, NULL);
+
+       vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
+                                0 /* don't unmask intr */,
+                                1 /* reset intr timer */);
+       if (!wq_work_done) {
+               napi_complete(napi);
+               vnic_intr_unmask(&enic->intr[intr]);
+       }
+
+       return 0;
+}
+
+static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
        struct enic *enic = netdev_priv(netdev);
@@ -1203,6 +1289,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
        unsigned int work_done = 0;
        int err;
 
+       if (!enic_poll_lock_napi(&enic->rq[rq]))
+               return work_done;
        /* Service RQ
         */
 
@@ -1248,6 +1336,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
+       enic_poll_unlock_napi(&enic->rq[rq]);
 
        return work_done;
 }
@@ -1267,6 +1356,7 @@ static void enic_free_intr(struct enic *enic)
        struct net_device *netdev = enic->netdev;
        unsigned int i;
 
+       enic_free_rx_cpu_rmap(enic);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                free_irq(enic->pdev->irq, netdev);
@@ -1291,6 +1381,7 @@ static int enic_request_intr(struct enic *enic)
        unsigned int i, intr;
        int err = 0;
 
+       enic_set_rx_cpu_rmap(enic);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
 
        case VNIC_DEV_INTR_MODE_INTX:
@@ -1312,17 +1403,19 @@ static int enic_request_intr(struct enic *enic)
                        snprintf(enic->msix[intr].devname,
                                sizeof(enic->msix[intr].devname),
                                "%.11s-rx-%d", netdev->name, i);
-                       enic->msix[intr].isr = enic_isr_msix_rq;
+                       enic->msix[intr].isr = enic_isr_msix;
                        enic->msix[intr].devid = &enic->napi[i];
                }
 
                for (i = 0; i < enic->wq_count; i++) {
+                       int wq = enic_cq_wq(enic, i);
+
                        intr = enic_msix_wq_intr(enic, i);
                        snprintf(enic->msix[intr].devname,
                                sizeof(enic->msix[intr].devname),
                                "%.11s-tx-%d", netdev->name, i);
-                       enic->msix[intr].isr = enic_isr_msix_wq;
-                       enic->msix[intr].devid = enic;
+                       enic->msix[intr].isr = enic_isr_msix;
+                       enic->msix[intr].devid = &enic->napi[wq];
                }
 
                intr = enic_msix_err_intr(enic);
@@ -1421,7 +1514,7 @@ static int enic_dev_notify_set(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                err = vnic_dev_notify_set(enic->vdev,
@@ -1435,7 +1528,7 @@ static int enic_dev_notify_set(struct enic *enic)
                err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
                break;
        }
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -1494,15 +1587,20 @@ static int enic_open(struct net_device *netdev)
 
        netif_tx_wake_all_queues(netdev);
 
-       for (i = 0; i < enic->rq_count; i++)
+       for (i = 0; i < enic->rq_count; i++) {
+               enic_busy_poll_init_lock(&enic->rq[i]);
                napi_enable(&enic->napi[i]);
-
+       }
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
        enic_dev_enable(enic);
 
        for (i = 0; i < enic->intr_count; i++)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
+       enic_rfs_flw_tbl_init(enic);
 
        return 0;
 
@@ -1529,14 +1627,23 @@ static int enic_stop(struct net_device *netdev)
        enic_synchronize_irqs(enic);
 
        del_timer_sync(&enic->notify_timer);
+       enic_rfs_flw_tbl_free(enic);
 
        enic_dev_disable(enic);
 
-       for (i = 0; i < enic->rq_count; i++)
+       local_bh_disable();
+       for (i = 0; i < enic->rq_count; i++) {
                napi_disable(&enic->napi[i]);
+               while (!enic_poll_lock_napi(&enic->rq[i]))
+                       mdelay(1);
+       }
+       local_bh_enable();
 
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
 
        if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
                enic_dev_del_station_addr(enic);
@@ -1656,13 +1763,14 @@ static void enic_poll_controller(struct net_device *netdev)
        case VNIC_DEV_INTR_MODE_MSIX:
                for (i = 0; i < enic->rq_count; i++) {
                        intr = enic_msix_rq_intr(enic, i);
-                       enic_isr_msix_rq(enic->msix_entry[intr].vector,
-                               &enic->napi[i]);
+                       enic_isr_msix(enic->msix_entry[intr].vector,
+                                     &enic->napi[i]);
                }
 
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
-                       enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+                       enic_isr_msix(enic->msix_entry[intr].vector,
+                                     &enic->napi[enic_cq_wq(enic, i)]);
                }
 
                break;
@@ -1758,11 +1866,11 @@ static int enic_set_rsskey(struct enic *enic)
 
        memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_rss_key(enic,
                rss_key_buf_pa,
                sizeof(union vnic_rss_key));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
                rss_key_buf_va, rss_key_buf_pa);
@@ -1785,11 +1893,11 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
        for (i = 0; i < (1 << rss_hash_bits); i++)
                (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_rss_cpu(enic,
                rss_cpu_buf_pa,
                sizeof(union vnic_rss_cpu));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
                rss_cpu_buf_va, rss_cpu_buf_pa);
@@ -1807,13 +1915,13 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
        /* Enable VLAN tag stripping.
        */
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_nic_cfg(enic,
                rss_default_cpu, rss_hash_type,
                rss_hash_bits, rss_base_cpu,
                rss_enable, tso_ipid_split_en,
                ig_vlan_strip_en);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -2021,6 +2129,12 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = enic_rx_flow_steer,
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = enic_busy_poll,
+#endif
 };
 
 static const struct net_device_ops enic_netdev_ops = {
@@ -2041,14 +2155,25 @@ static const struct net_device_ops enic_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = enic_rx_flow_steer,
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = enic_busy_poll,
+#endif
 };
 
 static void enic_dev_deinit(struct enic *enic)
 {
        unsigned int i;
 
-       for (i = 0; i < enic->rq_count; i++)
+       for (i = 0; i < enic->rq_count; i++) {
+               napi_hash_del(&enic->napi[i]);
                netif_napi_del(&enic->napi[i]);
+       }
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
 
        enic_free_vnic_resources(enic);
        enic_clear_intr_mode(enic);
@@ -2114,11 +2239,17 @@ static int enic_dev_init(struct enic *enic)
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        default:
                netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+               napi_hash_add(&enic->napi[0]);
                break;
        case VNIC_DEV_INTR_MODE_MSIX:
-               for (i = 0; i < enic->rq_count; i++)
+               for (i = 0; i < enic->rq_count; i++) {
                        netif_napi_add(netdev, &enic->napi[i],
-                               enic_poll_msix, 64);
+                               enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+                       napi_hash_add(&enic->napi[i]);
+               }
+               for (i = 0; i < enic->wq_count; i++)
+                       netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
+                                      enic_poll_msix_wq, NAPI_POLL_WEIGHT);
                break;
        }
 
@@ -2386,6 +2517,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->features |= netdev->hw_features;
 
+#ifdef CONFIG_RFS_ACCEL
+       netdev->hw_features |= NETIF_F_NTUPLE;
+#endif
+
        if (using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
index 31d658880c3cfae3cc5a8e8935a68722e8e95ab6..9c96911fb2c8a7dc0bfb2df8e2c9398c15aa4132 100644 (file)
@@ -71,6 +71,7 @@ int enic_get_vnic_config(struct enic *enic)
        GET_CONFIG(intr_mode);
        GET_CONFIG(intr_timer_usec);
        GET_CONFIG(loop_tag);
+       GET_CONFIG(num_arfs);
 
        c->wq_desc_count =
                min_t(u32, ENIC_MAX_WQ_DESCS,
index e86a45cb9e682980b25e5f6ebfe1772afb3a377b..5abc496bcf29e64d3106b6451739dbdda6a4e7cf 100644 (file)
@@ -312,12 +312,12 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                                err = (int)readq(&devcmd->args[0]);
                                if (err == ERR_EINVAL &&
                                    cmd == CMD_CAPABILITY)
-                                       return err;
+                                       return -err;
                                if (err != ERR_ECMDUNKNOWN ||
                                    cmd != CMD_CAPABILITY)
                                        pr_err("Error %d devcmd %d\n",
                                                err, _CMD_N(cmd));
-                               return err;
+                               return -err;
                        }
 
                        if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
@@ -1048,3 +1048,64 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
 
        return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
 }
+
+/* vnic_dev_classifier: Add/Delete classifier entries
+ * @vdev: vdev of the device
+ * @cmd: CLSF_ADD for Add filter
+ *      CLSF_DEL for Delete filter
+ * @entry: In case of ADD filter, the caller passes the RQ number in this
+ *        variable.
+ *
+ *        This function stores the filter_id returned by the firmware in the
+ *        same variable before return;
+ *
+ *        In case of DEL filter, the caller passes the RQ number. Return
+ *        value is irrelevant.
+ * @data: filter data
+ */
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+                       struct filter *data)
+{
+       u64 a0, a1;
+       int wait = 1000;
+       dma_addr_t tlv_pa;
+       int ret = -EINVAL;
+       struct filter_tlv *tlv, *tlv_va;
+       struct filter_action *action;
+       u64 tlv_size;
+
+       if (cmd == CLSF_ADD) {
+               tlv_size = sizeof(struct filter) +
+                          sizeof(struct filter_action) +
+                          2 * sizeof(struct filter_tlv);
+               tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
+               if (!tlv_va)
+                       return -ENOMEM;
+               tlv = tlv_va;
+               a0 = tlv_pa;
+               a1 = tlv_size;
+               memset(tlv, 0, tlv_size);
+               tlv->type = CLSF_TLV_FILTER;
+               tlv->length = sizeof(struct filter);
+               *(struct filter *)&tlv->val = *data;
+
+               tlv = (struct filter_tlv *)((char *)tlv +
+                                           sizeof(struct filter_tlv) +
+                                           sizeof(struct filter));
+
+               tlv->type = CLSF_TLV_ACTION;
+               tlv->length = sizeof(struct filter_action);
+               action = (struct filter_action *)&tlv->val;
+               action->type = FILTER_ACTION_RQ_STEERING;
+               action->u.rq_idx = *entry;
+
+               ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+               *entry = (u16)a0;
+               pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+       } else if (cmd == CLSF_DEL) {
+               a0 = *entry;
+               ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
+       }
+
+       return ret;
+}
index 1f3b301f822517d89ea881014b56152d3110c918..1fb214efcebaf0bb959dc62a0871f792cd2b4e43 100644 (file)
@@ -133,5 +133,7 @@ int vnic_dev_enable2(struct vnic_dev *vdev, int active);
 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+                       struct filter *data);
 
 #endif /* _VNIC_DEV_H_ */
index b9a0d78fd6391698a15a169cf206ec27b098b2c0..435d0cd96c224c5c8b6a5c8db6498d41458b8cf6 100644 (file)
@@ -603,6 +603,11 @@ struct filter_tlv {
        u_int32_t val[0];
 };
 
+enum {
+       CLSF_ADD = 0,
+       CLSF_DEL = 1,
+};
+
 /*
  * Writing cmd register causes STAT_BUSY to get set in status register.
  * When cmd completes, STAT_BUSY will be cleared.
index 609542848e029de6289051ba198e39e7a2e58291..75aced2de86987b6a96205ca6d079f4d5cfd77be 100644 (file)
@@ -32,6 +32,8 @@ struct vnic_enet_config {
        char devname[16];
        u32 intr_timer_usec;
        u16 loop_tag;
+       u16 vf_rq_count;
+       u16 num_arfs;
 };
 
 #define VENETF_TSO             0x1     /* TSO enabled */
index ee7bc95af278c691acebb358341a9885017863f3..8111d5202df2f38c26a8c241a7bb1c1e7cda8228 100644 (file)
@@ -85,6 +85,21 @@ struct vnic_rq {
        struct vnic_rq_buf *to_clean;
        void *os_buf_head;
        unsigned int pkts_outstanding;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define ENIC_POLL_STATE_IDLE           0
+#define ENIC_POLL_STATE_NAPI           (1 << 0) /* NAPI owns this poll */
+#define ENIC_POLL_STATE_POLL           (1 << 1) /* poll owns this poll */
+#define ENIC_POLL_STATE_NAPI_YIELD     (1 << 2) /* NAPI yielded this poll */
+#define ENIC_POLL_STATE_POLL_YIELD     (1 << 3) /* poll yielded this poll */
+#define ENIC_POLL_YIELD                        (ENIC_POLL_STATE_NAPI_YIELD |   \
+                                        ENIC_POLL_STATE_POLL_YIELD)
+#define ENIC_POLL_LOCKED               (ENIC_POLL_STATE_NAPI |         \
+                                        ENIC_POLL_STATE_POLL)
+#define ENIC_POLL_USER_PEND            (ENIC_POLL_STATE_POLL |         \
+                                        ENIC_POLL_STATE_POLL_YIELD)
+       unsigned int bpoll_state;
+       spinlock_t bpoll_lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
@@ -197,6 +212,113 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
        return 0;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
+{
+       spin_lock_init(&rq->bpoll_lock);
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+}
+
+static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
+{
+       bool rc = true;
+
+       spin_lock(&rq->bpoll_lock);
+       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
+               WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
+               rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               rq->bpoll_state = ENIC_POLL_STATE_NAPI;
+       }
+       spin_unlock(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+{
+       bool rc = false;
+
+       spin_lock(&rq->bpoll_lock);
+       WARN_ON(rq->bpoll_state &
+               (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
+       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
+               rc = true;
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+       spin_unlock(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
+{
+       bool rc = true;
+
+       spin_lock_bh(&rq->bpoll_lock);
+       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
+               rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               rq->bpoll_state |= ENIC_POLL_STATE_POLL;
+       }
+       spin_unlock_bh(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+       bool rc = false;
+
+       spin_lock_bh(&rq->bpoll_lock);
+       WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
+       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
+               rc = true;
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+       spin_unlock_bh(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
+{
+       WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
+       return rq->bpoll_state & ENIC_POLL_USER_PEND;
+}
+
+#else
+
+static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
+{
+}
+
+static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
+{
+       return true;
+}
+
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
+{
+       return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 void vnic_rq_free(struct vnic_rq *rq);
 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
        unsigned int desc_count, unsigned int desc_size);
index c05b66dfcc30bc9c8e6f300f3d1c47300cd73808..7091fa6ed09685bc226108e1213f8d19168c3117 100644 (file)
@@ -3250,7 +3250,6 @@ srom_map_media(struct net_device *dev)
        printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
                                                          lp->infoblock_media);
        return -1;
-       break;
     }
 
     return 0;
index c2f5d2d3b9324269edd1dc7d18e3d5355f209faf..0048ef8e5f358aea61d07639f9f2654002ef5f52 100644 (file)
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.2u"
+#define DRV_VER                        "10.4u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -372,6 +372,7 @@ enum vf_state {
 };
 
 #define BE_FLAGS_LINK_STATUS_INIT              1
+#define BE_FLAGS_SRIOV_ENABLED                 (1 << 2)
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
 #define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
 #define BE_FLAGS_MCAST_PROMISC                 (1 << 5)
@@ -411,6 +412,7 @@ struct be_resources {
        u16 max_vlans;          /* Number of vlans supported */
        u16 max_evt_qs;
        u32 if_cap_flags;
+       u32 vf_if_cap_flags;    /* VF if capability flags */
 };
 
 struct rss_info {
@@ -500,6 +502,7 @@ struct be_adapter {
        u32 flash_status;
        struct completion et_cmd_compl;
 
+       struct be_resources pool_res;   /* resources available for the port */
        struct be_resources res;        /* resources available for the func */
        u16 num_vfs;                    /* Number of VFs provisioned by PF */
        u8 virtfn;
@@ -523,9 +526,9 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define be_virtfn(adapter)             (adapter->virtfn)
-#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_physfn(adapter) && \
-                                        (num_vfs || pci_num_vf(adapter->pdev)))
+#define sriov_enabled(adapter)         (adapter->flags &       \
+                                        BE_FLAGS_SRIOV_ENABLED)
+
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
@@ -536,7 +539,7 @@ struct be_adapter {
 #define be_max_vlans(adapter)          (adapter->res.max_vlans)
 #define be_max_uc(adapter)             (adapter->res.max_uc_mac)
 #define be_max_mc(adapter)             (adapter->res.max_mcast_mac)
-#define be_max_vfs(adapter)            (adapter->res.max_vfs)
+#define be_max_vfs(adapter)            (adapter->pool_res.max_vfs)
 #define be_max_rss(adapter)            (adapter->res.max_rss_qs)
 #define be_max_txqs(adapter)           (adapter->res.max_tx_qs)
 #define be_max_prio_txqs(adapter)      (adapter->res.max_prio_tx_qs)
@@ -671,6 +674,8 @@ static inline void swap_dws(void *wrb, int len)
 #endif                         /* __BIG_ENDIAN */
 }
 
+#define be_cmd_status(status)          (status > 0 ? -EIO : status)
+
 static inline u8 is_tcp_pkt(struct sk_buff *skb)
 {
        u8 val = 0;
index f4ea3490f44657f3e90974065fea9a0eee649cd0..791094c33535d845bd68c09cf1ac4e651ec4b8a6 100644 (file)
@@ -1749,8 +1749,7 @@ err:
 }
 
 /* Uses synchronous mcc */
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                     char *fw_on_flash)
+int be_cmd_get_fw_ver(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_fw_version *req;
@@ -1772,9 +1771,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
-               strcpy(fw_ver, resp->firmware_version_string);
-               if (fw_on_flash)
-                       strcpy(fw_on_flash, resp->fw_on_flash_version_string);
+               strcpy(adapter->fw_ver, resp->firmware_version_string);
+               strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string);
        }
 err:
        spin_unlock_bh(&adapter->mcc_lock);
@@ -1997,8 +1995,7 @@ err:
 }
 
 /* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
-                       u32 *mode, u32 *caps, u16 *asic_rev)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_query_fw_cfg *req;
@@ -2017,10 +2014,10 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
        status = be_mbox_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
-               *port_num = le32_to_cpu(resp->phys_port);
-               *mode = le32_to_cpu(resp->function_mode);
-               *caps = le32_to_cpu(resp->function_caps);
-               *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
+               adapter->port_num = le32_to_cpu(resp->phys_port);
+               adapter->function_mode = le32_to_cpu(resp->function_mode);
+               adapter->function_caps = le32_to_cpu(resp->function_caps);
+               adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
        }
 
        mutex_unlock(&adapter->mbox_lock);
@@ -2224,7 +2221,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
                                         msecs_to_jiffies(60000)))
-               status = -1;
+               status = -ETIMEDOUT;
        else
                status = adapter->flash_status;
 
@@ -2320,7 +2317,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
                                         msecs_to_jiffies(40000)))
-               status = -1;
+               status = -ETIMEDOUT;
        else
                status = adapter->flash_status;
 
@@ -3313,15 +3310,28 @@ err:
        return status;
 }
 
-static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
+/* Descriptor type */
+enum {
+       FUNC_DESC = 1,
+       VFT_DESC = 2
+};
+
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+                                              int desc_type)
 {
        struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+       struct be_nic_res_desc *nic;
        int i;
 
        for (i = 0; i < desc_count; i++) {
                if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
-                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
-                       return (struct be_nic_res_desc *)hdr;
+                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
+                       nic = (struct be_nic_res_desc *)hdr;
+                       if (desc_type == FUNC_DESC ||
+                           (desc_type == VFT_DESC &&
+                            nic->flags & (1 << VFT_SHIFT)))
+                               return nic;
+               }
 
                hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
                hdr = (void *)hdr + hdr->desc_len;
@@ -3329,6 +3339,16 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
        return NULL;
 }
 
+static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, VFT_DESC);
+}
+
+static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, FUNC_DESC);
+}
+
 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
                                                 u32 desc_count)
 {
@@ -3424,7 +3444,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
                u32 desc_count = le32_to_cpu(resp->desc_count);
                struct be_nic_res_desc *desc;
 
-               desc = be_get_nic_desc(resp->func_param, desc_count);
+               desc = be_get_func_nic_desc(resp->func_param, desc_count);
                if (!desc) {
                        status = -EINVAL;
                        goto err;
@@ -3440,76 +3460,17 @@ err:
        return status;
 }
 
-/* Uses mbox */
-static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
-       wrb = wrb_from_mbox(adapter);
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mbox_notify_wait(adapter);
-
-       mutex_unlock(&adapter->mbox_lock);
-       return status;
-}
-
-/* Uses sync mcc */
-static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mcc_notify_wait(adapter);
-
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
-       return status;
-}
-
-/* Uses sync mcc, if MCCQ is already created otherwise mbox */
+/* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
+       struct be_cmd_req_get_profile_config *req;
+       struct be_nic_res_desc *vf_res;
        struct be_pcie_res_desc *pcie;
        struct be_port_res_desc *port;
        struct be_nic_res_desc *nic;
-       struct be_queue_info *mccq = &adapter->mcc_obj.q;
+       struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
        u32 desc_count;
        int status;
@@ -3520,10 +3481,17 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (!cmd.va)
                return -ENOMEM;
 
-       if (!mccq->created)
-               status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
-       else
-               status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
+       req = cmd.va;
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_GET_PROFILE_CONFIG,
+                              cmd.size, &wrb, &cmd);
+
+       req->hdr.domain = domain;
+       if (!lancer_chip(adapter))
+               req->hdr.version = 1;
+       req->type = ACTIVE_PROFILE_TYPE;
+
+       status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
@@ -3539,48 +3507,52 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (port)
                adapter->mc_type = port->mc_type;
 
-       nic = be_get_nic_desc(resp->func_param, desc_count);
+       nic = be_get_func_nic_desc(resp->func_param, desc_count);
        if (nic)
                be_copy_nic_desc(res, nic);
 
+       vf_res = be_get_vft_desc(resp->func_param, desc_count);
+       if (vf_res)
+               res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
                pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain)
+/* Will use MBOX only if MCCQ has not been created */
+static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+                                    int size, int count, u8 version, u8 domain)
 {
        struct be_cmd_req_set_profile_config *req;
-       struct be_mcc_wrb *wrb;
+       struct be_mcc_wrb wrb = {0};
+       struct be_dma_mem cmd;
        int status;
 
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
+       memset(&cmd, 0, sizeof(struct be_dma_mem));
+       cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       if (!cmd.va)
+               return -ENOMEM;
 
-       req = embedded_payload(wrb);
+       req = cmd.va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
-                              wrb, NULL);
+                              OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
+                              &wrb, &cmd);
        req->hdr.version = version;
        req->hdr.domain = domain;
-       req->desc_count = cpu_to_le32(1);
+       req->desc_count = cpu_to_le32(count);
        memcpy(req->desc, desc, size);
 
-       status = be_mcc_notify_wait(adapter);
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
+       status = be_cmd_notify_wait(adapter, &wrb);
+
+       if (cmd.va)
+               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
 /* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
 {
        memset(nic, 0, sizeof(*nic));
        nic->unicast_mac_count = 0xFFFF;
@@ -3601,9 +3573,20 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
        nic->wol_param = 0x0F;
        nic->tunnel_iface_count = 0xFFFF;
        nic->direct_tenant_iface_count = 0xFFFF;
+       nic->bw_min = 0xFFFFFFFF;
        nic->bw_max = 0xFFFFFFFF;
 }
 
+/* Mark all fields invalid */
+static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
+{
+       memset(pcie, 0, sizeof(*pcie));
+       pcie->sriov_state = 0xFF;
+       pcie->pf_state = 0xFF;
+       pcie->pf_type = 0xFF;
+       pcie->num_vfs = 0xFFFF;
+}
+
 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                      u8 domain)
 {
@@ -3634,7 +3617,63 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
 
        return be_cmd_set_profile_config(adapter, &nic_desc,
                                         nic_desc.hdr.desc_len,
-                                        version, domain);
+                                        1, version, domain);
+}
+
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs)
+{
+       struct {
+               struct be_pcie_res_desc pcie;
+               struct be_nic_res_desc nic_vft;
+       } __packed desc;
+       u16 vf_q_count;
+
+       if (BEx_chip(adapter) || lancer_chip(adapter))
+               return 0;
+
+       /* PF PCIE descriptor */
+       be_reset_pcie_desc(&desc.pcie);
+       desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
+       desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.pf_num = adapter->pdev->devfn;
+       desc.pcie.sriov_state = num_vfs ? 1 : 0;
+       desc.pcie.num_vfs = cpu_to_le16(num_vfs);
+
+       /* VF NIC Template descriptor */
+       be_reset_nic_desc(&desc.nic_vft);
+       desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+       desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
+                               (1 << NOSV_SHIFT);
+       desc.nic_vft.pf_num = adapter->pdev->devfn;
+       desc.nic_vft.vf_num = 0;
+
+       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               /* If number of VFs requested is 8 less than max supported,
+                * assign 8 queue pairs to the PF and divide the remaining
+                * resources evenly among the VFs
+                */
+               if (num_vfs < (be_max_vfs(adapter) - 8))
+                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
+               else
+                       vf_q_count = res.max_rss_qs / num_vfs;
+
+               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
+               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
+       } else {
+               desc.nic_vft.txq_count = cpu_to_le16(1);
+               desc.nic_vft.rq_count = cpu_to_le16(1);
+               desc.nic_vft.rssq_count = cpu_to_le16(0);
+               /* One CQ for each TX, RX and MCCQ */
+               desc.nic_vft.cq_count = cpu_to_le16(3);
+       }
+
+       return be_cmd_set_profile_config(adapter, &desc,
+                                        2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
 }
 
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3686,7 +3725,7 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
        }
 
        return be_cmd_set_profile_config(adapter, &port_desc,
-                                        RESOURCE_DESC_SIZE_V1, 1, 0);
+                                        RESOURCE_DESC_SIZE_V1, 1, 1, 0);
 }
 
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
index 59b3c056f3297bae3a101192943a944772f3d5c7..03e8a15c69225c7bbef9db3d36ed4f0960885d7b 100644 (file)
@@ -1081,11 +1081,6 @@ struct be_cmd_req_modify_eq_delay {
        struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
-struct be_cmd_resp_modify_eq_delay {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd0;
-} __packed;
-
 /******************** Get FW Config *******************/
 /* The HW can come up in either of the following multi-channel modes
  * based on the skew/IPL.
@@ -1156,11 +1151,6 @@ struct be_cmd_req_enable_disable_beacon {
        u8  status_duration;
 } __packed;
 
-struct be_cmd_resp_enable_disable_beacon {
-       struct be_cmd_resp_hdr resp_hdr;
-       u32 rsvd0;
-} __packed;
-
 struct be_cmd_req_get_beacon_state {
        struct be_cmd_req_hdr hdr;
        u8  port_num;
@@ -1326,11 +1316,6 @@ struct be_cmd_req_set_lmode {
        u8 loopback_state;
 };
 
-struct be_cmd_resp_set_lmode {
-       struct be_cmd_resp_hdr resp_hdr;
-       u8 rsvd0[4];
-};
-
 /********************** DDR DMA test *********************/
 struct be_cmd_req_ddrdma_test {
        struct be_cmd_req_hdr hdr;
@@ -1434,11 +1419,6 @@ struct be_cmd_req_set_qos {
        u32 rsvd[7];
 };
 
-struct be_cmd_resp_set_qos {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd;
-};
-
 /*********************** Controller Attributes ***********************/
 struct be_cmd_req_cntl_attribs {
        struct be_cmd_req_hdr hdr;
@@ -1572,11 +1552,6 @@ struct be_cmd_req_set_hsw_config {
        u8 context[sizeof(struct amap_set_hsw_context) / 8];
 } __packed;
 
-struct be_cmd_resp_set_hsw_config {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd;
-};
-
 struct amap_get_hsw_req_context {
        u8 interface_id[16];
        u8 rsvd0[14];
@@ -1835,6 +1810,7 @@ struct be_cmd_req_set_ext_fat_caps {
 #define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
+#define VFT_SHIFT                              3       /* VF template */
 #define IMM_SHIFT                              6       /* Immediate */
 #define NOSV_SHIFT                             7       /* No save */
 
@@ -1962,12 +1938,8 @@ struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
        u32 desc_count;
-       u8 desc[RESOURCE_DESC_SIZE_V1];
-};
-
-struct be_cmd_resp_set_profile_config {
-       struct be_cmd_resp_hdr hdr;
-};
+       u8 desc[2 * RESOURCE_DESC_SIZE_V1];
+} __packed;
 
 struct be_cmd_req_get_active_profile {
        struct be_cmd_req_hdr hdr;
@@ -2070,16 +2042,14 @@ int be_cmd_reset(struct be_adapter *adapter);
 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
                               struct be_dma_mem *nonemb_cmd);
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                     char *fw_on_flash);
+int be_cmd_get_fw_ver(struct be_adapter *adapter);
 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
                       u32 num);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
-                       u32 *function_mode, u32 *function_caps, u16 *asic_rev);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter);
 int be_cmd_reset_function(struct be_adapter *adapter);
 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
                      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
@@ -2157,8 +2127,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
@@ -2168,3 +2136,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
                                          int link_state, u8 domain);
 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs);
index e2da4d20dd3de7441f8137f1be8a9ecdf5cec615..25f516d6eb9e597129bd4715e4046fd5b486e04b 100644 (file)
@@ -643,7 +643,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
        if (status)
                dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static int be_set_phys_id(struct net_device *netdev,
@@ -762,7 +762,7 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
 err:
        dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
                          ddrdma_cmd.dma);
-       return ret;
+       return be_cmd_status(ret);
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
@@ -885,7 +885,7 @@ static int be_read_eeprom(struct net_device *netdev,
        dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
                          eeprom_cmd.dma);
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static u32 be_get_msg_level(struct net_device *netdev)
@@ -1042,7 +1042,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
        if (!status)
                adapter->rss_info.rss_flags = rss_flags;
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
@@ -1080,6 +1080,7 @@ static int be_set_channels(struct net_device  *netdev,
                           struct ethtool_channels *ch)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status;
 
        if (ch->rx_count || ch->tx_count || ch->other_count ||
            !ch->combined_count || ch->combined_count > be_max_qs(adapter))
@@ -1087,7 +1088,8 @@ static int be_set_channels(struct net_device  *netdev,
 
        adapter->cfg_num_qs = ch->combined_count;
 
-       return be_update_queues(adapter);
+       status = be_update_queues(adapter);
+       return be_cmd_status(status);
 }
 
 static u32 be_get_rxfh_indir_size(struct net_device *netdev)
index 1e187fb760f80fce4099f779ee44abafc10b22f2..9c50814f1e952b05188beb619bca2e525d7354c4 100644 (file)
@@ -81,10 +81,10 @@ static const char * const ue_status_low_desc[] = {
        "P1_OB_LINK ",
        "HOST_GPIO ",
        "MBOX ",
-       "AXGMAC0",
-       "AXGMAC1",
-       "JTAG",
-       "MPU_INTPEND"
+       "ERX2 ",
+       "SPARE ",
+       "JTAG ",
+       "MPU_INTPEND "
 };
 /* UE Status High CSR */
 static const char * const ue_status_hi_desc[] = {
@@ -109,16 +109,16 @@ static const char * const ue_status_hi_desc[] = {
        "HOST5",
        "HOST6",
        "HOST7",
-       "HOST8",
-       "HOST9",
+       "ECRC",
+       "Poison TLP",
        "NETC",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
+       "PERIPH",
+       "LLTXULP",
+       "D2P",
+       "RCON",
+       "LDMA",
+       "LLTXP",
+       "LLTXPB",
        "Unknown"
 };
 
@@ -1172,20 +1172,15 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       int status = 0;
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
-               goto ret;
+               return 0;
 
        clear_bit(vid, adapter->vids);
-       status = be_vid_config(adapter);
-       if (!status)
-               adapter->vlans_added--;
-       else
-               set_bit(vid, adapter->vids);
-ret:
-       return status;
+       adapter->vlans_added--;
+
+       return be_vid_config(adapter);
 }
 
 static void be_clear_promisc(struct be_adapter *adapter)
@@ -1286,13 +1281,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
                                        vf + 1);
        }
 
-       if (status)
-               dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
-                       mac, vf);
-       else
-               memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+       if (status) {
+               dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
+                       mac, vf, status);
+               return be_cmd_status(status);
+       }
 
-       return status;
+       ether_addr_copy(vf_cfg->mac_addr, mac);
+
+       return 0;
 }
 
 static int be_get_vf_config(struct net_device *netdev, int vf,
@@ -1341,12 +1338,16 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
                                               vf + 1, vf_cfg->if_handle, 0);
        }
 
-       if (!status)
-               vf_cfg->vlan_tag = vlan;
-       else
-               dev_info(&adapter->pdev->dev,
-                        "VLAN %d config on VF %d failed\n", vlan, vf);
-       return status;
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "VLAN %d config on VF %d failed : %#x\n", vlan,
+                       vf, status);
+               return be_cmd_status(status);
+       }
+
+       vf_cfg->vlan_tag = vlan;
+
+       return 0;
 }
 
 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
@@ -1377,7 +1378,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
 
        if (!link_status) {
                dev_err(dev, "TX-rate setting not allowed when link is down\n");
-               status = -EPERM;
+               status = -ENETDOWN;
                goto err;
        }
 
@@ -1408,7 +1409,7 @@ config_qos:
 err:
        dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
                max_tx_rate, vf);
-       return status;
+       return be_cmd_status(status);
 }
 static int be_set_vf_link_state(struct net_device *netdev, int vf,
                                int link_state)
@@ -1423,10 +1424,15 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
                return -EINVAL;
 
        status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
-       if (!status)
-               adapter->vf_cfg[vf].plink_tracking = link_state;
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "Link state change on VF %d failed: %#x\n", vf, status);
+               return be_cmd_status(status);
+       }
 
-       return status;
+       adapter->vf_cfg[vf].plink_tracking = link_state;
+
+       return 0;
 }
 
 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
@@ -2028,7 +2034,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
         */
        for (;;) {
                rxcp = be_rx_compl_get(rxo);
-               if (rxcp == NULL) {
+               if (!rxcp) {
                        if (lancer_chip(adapter))
                                break;
 
@@ -2935,8 +2941,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
                                     GFP_KERNEL);
-       if (cmd.va == NULL)
-               return -1;
+       if (!cmd.va)
+               return -ENOMEM;
 
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
@@ -3043,6 +3049,7 @@ static void be_vf_clear(struct be_adapter *adapter)
 done:
        kfree(adapter->vf_cfg);
        adapter->num_vfs = 0;
+       adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
 }
 
 static void be_clear_queues(struct be_adapter *adapter)
@@ -3098,6 +3105,13 @@ static int be_clear(struct be_adapter *adapter)
        if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
+       /* Re-configure FW to distribute resources evenly across max-supported
+        * number of VFs, only when VFs are not already enabled.
+        */
+       if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+               be_cmd_set_sriov_config(adapter, adapter->pool_res,
+                                       pci_sriov_get_totalvfs(adapter->pdev));
+
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
@@ -3170,19 +3184,6 @@ static int be_vf_setup(struct be_adapter *adapter)
        u32 privileges;
 
        old_vfs = pci_num_vf(adapter->pdev);
-       if (old_vfs) {
-               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
-               if (old_vfs != num_vfs)
-                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
-               adapter->num_vfs = old_vfs;
-       } else {
-               if (num_vfs > be_max_vfs(adapter))
-                       dev_info(dev, "Device supports %d VFs and not %d\n",
-                                be_max_vfs(adapter), num_vfs);
-               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
-               if (!adapter->num_vfs)
-                       return 0;
-       }
 
        status = be_vf_setup_init(adapter);
        if (status)
@@ -3194,17 +3195,15 @@ static int be_vf_setup(struct be_adapter *adapter)
                        if (status)
                                goto err;
                }
-       } else {
-               status = be_vfs_if_create(adapter);
-               if (status)
-                       goto err;
-       }
 
-       if (old_vfs) {
                status = be_vfs_mac_query(adapter);
                if (status)
                        goto err;
        } else {
+               status = be_vfs_if_create(adapter);
+               if (status)
+                       goto err;
+
                status = be_vf_eth_addr_config(adapter);
                if (status)
                        goto err;
@@ -3243,6 +3242,8 @@ static int be_vf_setup(struct be_adapter *adapter)
                        goto err;
                }
        }
+
+       adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
        return 0;
 err:
        dev_err(dev, "VF setup failed\n");
@@ -3270,19 +3271,7 @@ static u8 be_convert_mc_type(u32 function_mode)
 static void BEx_get_resources(struct be_adapter *adapter,
                              struct be_resources *res)
 {
-       struct pci_dev *pdev = adapter->pdev;
-       bool use_sriov = false;
-       int max_vfs = 0;
-
-       if (be_physfn(adapter) && BE3_chip(adapter)) {
-               be_cmd_get_profile_config(adapter, res, 0);
-               /* Some old versions of BE3 FW don't report max_vfs value */
-               if (res->max_vfs == 0) {
-                       max_vfs = pci_sriov_get_totalvfs(pdev);
-                       res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               }
-               use_sriov = res->max_vfs && sriov_want(adapter);
-       }
+       bool use_sriov = adapter->num_vfs ? 1 : 0;
 
        if (be_physfn(adapter))
                res->max_uc_mac = BE_UC_PMAC_COUNT;
@@ -3326,7 +3315,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
        res->max_rx_qs = res->max_rss_qs + 1;
 
        if (be_physfn(adapter))
-               res->max_evt_qs = (res->max_vfs > 0) ?
+               res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
                                        BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
        else
                res->max_evt_qs = 1;
@@ -3349,6 +3338,54 @@ static void be_setup_init(struct be_adapter *adapter)
                adapter->cmd_privileges = MIN_PRIVILEGES;
 }
 
+static int be_get_sriov_config(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct be_resources res = {0};
+       int status, max_vfs, old_vfs;
+
+       status = be_cmd_get_profile_config(adapter, &res, 0);
+       if (status)
+               return status;
+
+       adapter->pool_res = res;
+
+       /* Some old versions of BE3 FW don't report max_vfs value */
+       if (BE3_chip(adapter) && !res.max_vfs) {
+               max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
+               res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+       }
+
+       adapter->pool_res.max_vfs = res.max_vfs;
+       pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+       if (!be_max_vfs(adapter)) {
+               if (num_vfs)
+                       dev_warn(dev, "device doesn't support SRIOV\n");
+               adapter->num_vfs = 0;
+               return 0;
+       }
+
+       /* validate num_vfs module param */
+       old_vfs = pci_num_vf(adapter->pdev);
+       if (old_vfs) {
+               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+               if (old_vfs != num_vfs)
+                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+               adapter->num_vfs = old_vfs;
+       } else {
+               if (num_vfs > be_max_vfs(adapter)) {
+                       dev_info(dev, "Resources unavailable to init %d VFs\n",
+                                num_vfs);
+                       dev_info(dev, "Limiting to %d VFs\n",
+                                be_max_vfs(adapter));
+               }
+               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
+       }
+
+       return 0;
+}
+
 static int be_get_resources(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
@@ -3374,13 +3411,6 @@ static int be_get_resources(struct be_adapter *adapter)
                        res.max_evt_qs /= 2;
                adapter->res = res;
 
-               if (be_physfn(adapter)) {
-                       status = be_cmd_get_profile_config(adapter, &res, 0);
-                       if (status)
-                               return status;
-                       adapter->res.max_vfs = res.max_vfs;
-               }
-
                dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
                         be_max_txqs(adapter), be_max_rxqs(adapter),
                         be_max_rss(adapter), be_max_eqs(adapter),
@@ -3393,16 +3423,12 @@ static int be_get_resources(struct be_adapter *adapter)
        return 0;
 }
 
-/* Routine to query per function resource limits */
 static int be_get_config(struct be_adapter *adapter)
 {
        u16 profile_id;
        int status;
 
-       status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
-                                    &adapter->function_mode,
-                                    &adapter->function_caps,
-                                    &adapter->asic_rev);
+       status = be_cmd_query_fw_cfg(adapter);
        if (status)
                return status;
 
@@ -3413,6 +3439,28 @@ static int be_get_config(struct be_adapter *adapter)
                                 "Using profile 0x%x\n", profile_id);
        }
 
+       if (!BE2_chip(adapter) && be_physfn(adapter)) {
+               status = be_get_sriov_config(adapter);
+               if (status)
+                       return status;
+
+               /* When the HW is in SRIOV capable configuration, the PF-pool
+                * resources are equally distributed across the max-number of
+                * VFs. The user may request only a subset of the max-vfs to be
+                * enabled. Based on num_vfs, redistribute the resources across
+                * num_vfs so that each VF will have access to more number of
+                * resources. This facility is not available in BE3 FW.
+                * Also, this is done by FW in Lancer chip.
+                */
+               if (!pci_num_vf(adapter->pdev)) {
+                       status = be_cmd_set_sriov_config(adapter,
+                                                        adapter->pool_res,
+                                                        adapter->num_vfs);
+                       if (status)
+                               return status;
+               }
+       }
+
        status = be_get_resources(adapter);
        if (status)
                return status;
@@ -3571,7 +3619,7 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
+       be_cmd_get_fw_ver(adapter);
 
        if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
                dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
@@ -3596,12 +3644,8 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_logical_link_config(adapter,
                                               IFLA_VF_LINK_STATE_AUTO, 0);
 
-       if (sriov_want(adapter)) {
-               if (be_max_vfs(adapter))
-                       be_vf_setup(adapter);
-               else
-                       dev_warn(dev, "device doesn't support SRIOV\n");
-       }
+       if (adapter->num_vfs)
+               be_vf_setup(adapter);
 
        status = be_cmd_get_phy_info(adapter);
        if (!status && be_pause_supported(adapter))
@@ -3925,7 +3969,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
        if (!fsec) {
                dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
-               return -1;
+               return -EINVAL;
        }
 
        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
@@ -4094,7 +4138,7 @@ lancer_fw_exit:
 static int be_get_ufi_type(struct be_adapter *adapter,
                           struct flash_file_hdr_g3 *fhdr)
 {
-       if (fhdr == NULL)
+       if (!fhdr)
                goto be_get_ufi_exit;
 
        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
@@ -4156,7 +4200,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
                                                              &flash_cmd,
                                                              num_imgs);
                                else {
-                                       status = -1;
+                                       status = -EINVAL;
                                        dev_err(&adapter->pdev->dev,
                                                "Can't load BE3 UFI on BE3R\n");
                                }
@@ -4167,7 +4211,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
        if (ufi_type == UFI_TYPE2)
                status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
        else if (ufi_type == -1)
-               status = -1;
+               status = -EINVAL;
 
        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
                          flash_cmd.dma);
@@ -4190,7 +4234,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
        if (!netif_running(adapter->netdev)) {
                dev_err(&adapter->pdev->dev,
                        "Firmware load not allowed (interface is down)\n");
-               return -1;
+               return -ENETDOWN;
        }
 
        status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -4205,8 +4249,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
                status = be_fw_download(adapter, fw);
 
        if (!status)
-               be_cmd_get_fw_ver(adapter, adapter->fw_ver,
-                                 adapter->fw_on_flash);
+               be_cmd_get_fw_ver(adapter);
 
 fw_exit:
        release_firmware(fw);
@@ -4437,12 +4480,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
 
        if (BEx_chip(adapter) && be_physfn(adapter)) {
                adapter->csr = pci_iomap(adapter->pdev, 2, 0);
-               if (adapter->csr == NULL)
+               if (!adapter->csr)
                        return -ENOMEM;
        }
 
        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
-       if (addr == NULL)
+       if (!addr)
                goto pci_map_err;
        adapter->db = addr;
 
@@ -4505,7 +4548,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
        rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
                                            rx_filter->size, &rx_filter->dma,
                                            GFP_KERNEL);
-       if (rx_filter->va == NULL) {
+       if (!rx_filter->va) {
                status = -ENOMEM;
                goto free_mbox;
        }
@@ -4554,8 +4597,8 @@ static int be_stats_init(struct be_adapter *adapter)
 
        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
                                      GFP_KERNEL);
-       if (cmd->va == NULL)
-               return -1;
+       if (!cmd->va)
+               return -ENOMEM;
        return 0;
 }
 
@@ -4776,7 +4819,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        pci_set_master(pdev);
 
        netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
-       if (netdev == NULL) {
+       if (!netdev) {
                status = -ENOMEM;
                goto rel_reg;
        }
index 671d080105a7e08c5e20456a5ad38b29f6704e19..bd53caf1c1eb6b735be414aecd11a4e3ff732747 100644 (file)
@@ -256,12 +256,6 @@ struct bufdesc_ex {
 #define FLAG_RX_CSUM_ENABLED   (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 #define FLAG_RX_CSUM_ERROR     (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 
-struct fec_enet_delayed_work {
-       struct delayed_work delay_work;
-       bool timeout;
-       bool trig_tx;
-};
-
 /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
  * tx_bd_base always point to the base of the buffer descriptors.  The
  * cur_rx and cur_tx point to the currently available buffer.
@@ -308,7 +302,6 @@ struct fec_enet_private {
 
        struct  platform_device *pdev;
 
-       int     opened;
        int     dev_id;
 
        /* Phylib and MDIO interface */
@@ -328,6 +321,8 @@ struct fec_enet_private {
        struct  napi_struct napi;
        int     csum_flags;
 
+       struct work_struct tx_timeout_work;
+
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        unsigned long last_overflow_check;
@@ -340,7 +335,6 @@ struct fec_enet_private {
        int hwts_rx_en;
        int hwts_tx_en;
        struct timer_list time_keep;
-       struct fec_enet_delayed_work delay_work;
        struct regulator *reg_phy;
 };
 
index 77037fd377b85dcda23bddc3c043b8d11e9d8cb3..e0efb212223f83530c248ff33b4910a79e3df243 100644 (file)
@@ -320,6 +320,27 @@ static void *swap_buffer(void *bufaddr, int len)
        return bufaddr;
 }
 
+static void fec_dump(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       struct bufdesc *bdp = fep->tx_bd_base;
+       unsigned int index = 0;
+
+       netdev_info(ndev, "TX ring dump\n");
+       pr_info("Nr     SC     addr       len  SKB\n");
+
+       do {
+               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+                       index,
+                       bdp == fep->cur_tx ? 'S' : ' ',
+                       bdp == fep->dirty_tx ? 'H' : ' ',
+                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+                       fep->tx_skbuff[index]);
+               bdp = fec_enet_get_nextdesc(bdp, fep);
+               index++;
+       } while (bdp != fep->tx_bd_base);
+}
+
 static inline bool is_ipv4_pkt(struct sk_buff *skb)
 {
        return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
@@ -342,22 +363,6 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
        return 0;
 }
 
-static void
-fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
-{
-       const struct platform_device_id *id_entry =
-                               platform_get_device_id(fep->pdev);
-       struct bufdesc *bdp_pre;
-
-       bdp_pre = fec_enet_get_prevdesc(bdp, fep);
-       if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
-           !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
-               fep->delay_work.trig_tx = true;
-               schedule_delayed_work(&(fep->delay_work.delay_work),
-                                       msecs_to_jiffies(1));
-       }
-}
-
 static int
 fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
 {
@@ -373,6 +378,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
        skb_frag_t *this_frag;
        unsigned int index;
        void *bufaddr;
+       dma_addr_t addr;
        int i;
 
        for (frag = 0; frag < nr_frags; frag++) {
@@ -415,15 +421,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
                                swap_buffer(bufaddr, frag_len);
                }
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                               frag_len, DMA_TO_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+               addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
                        dev_kfree_skb_any(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Tx DMA memory map failed\n");
                        goto dma_mapping_error;
                }
 
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_datlen = frag_len;
                bdp->cbd_sc = status;
        }
@@ -450,6 +457,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        int nr_frags = skb_shinfo(skb)->nr_frags;
        struct bufdesc *bdp, *last_bdp;
        void *bufaddr;
+       dma_addr_t addr;
        unsigned short status;
        unsigned short buflen;
        unsigned int estatus = 0;
@@ -490,12 +498,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
                        swap_buffer(bufaddr, buflen);
        }
 
-       /* Push the data cache so the CPM does not get stale memory
-        * data.
-        */
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                       buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       /* Push the data cache so the CPM does not get stale memory data. */
+       addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
@@ -537,6 +542,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        fep->tx_skbuff[index] = skb;
 
        bdp->cbd_datlen = buflen;
+       bdp->cbd_bufaddr = addr;
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
@@ -544,8 +550,6 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
        bdp->cbd_sc = status;
 
-       fec_enet_submit_work(bdp, fep);
-
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, fep);
 
@@ -570,12 +574,12 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
        unsigned short status;
        unsigned int estatus = 0;
+       dma_addr_t addr;
 
        status = bdp->cbd_sc;
        status &= ~BD_ENET_TX_STATS;
 
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-       bdp->cbd_datlen = size;
 
        if (((unsigned long) data) & FEC_ALIGNMENT ||
                id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
@@ -586,15 +590,17 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
                        swap_buffer(data, size);
        }
 
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
-                                       size, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
                return NETDEV_TX_BUSY;
        }
 
+       bdp->cbd_datlen = size;
+       bdp->cbd_bufaddr = addr;
+
        if (fep->bufdesc_ex) {
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -732,8 +738,6 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
        /* Save skb pointer */
        fep->tx_skbuff[index] = skb;
 
-       fec_enet_submit_work(bdp, fep);
-
        skb_tx_timestamp(skb);
        fep->cur_tx = bdp;
 
@@ -801,7 +805,7 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                /* Initialize the BD for every fragment in the page. */
                bdp->cbd_sc = 0;
-               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+               if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
                        fep->tx_skbuff[i] = NULL;
                }
@@ -815,12 +819,13 @@ static void fec_enet_bd_init(struct net_device *dev)
        fep->dirty_tx = bdp;
 }
 
-/* This function is called to start or restart the FEC during a link
- * change.  This only happens when switching between half and full
- * duplex.
+/*
+ * This function is called to start or restart the FEC during a link
+ * change, transmit timeout, or to reconfigure the FEC.  The network
+ * packet processing for this device must be stopped before this call.
  */
 static void
-fec_restart(struct net_device *ndev, int duplex)
+fec_restart(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
@@ -831,13 +836,6 @@ fec_restart(struct net_device *ndev, int duplex)
        u32 rcntl = OPT_FRAME_SIZE | 0x04;
        u32 ecntl = 0x2; /* ETHEREN */
 
-       if (netif_running(ndev)) {
-               netif_device_detach(ndev);
-               napi_disable(&fep->napi);
-               netif_stop_queue(ndev);
-               netif_tx_lock_bh(ndev);
-       }
-
        /* Whack a reset.  We should wait for this. */
        writel(1, fep->hwp + FEC_ECNTRL);
        udelay(10);
@@ -878,7 +876,7 @@ fec_restart(struct net_device *ndev, int duplex)
        }
 
        /* Enable MII mode */
-       if (duplex) {
+       if (fep->full_duplex == DUPLEX_FULL) {
                /* FD enable */
                writel(0x04, fep->hwp + FEC_X_CNTRL);
        } else {
@@ -887,8 +885,6 @@ fec_restart(struct net_device *ndev, int duplex)
                writel(0x0, fep->hwp + FEC_X_CNTRL);
        }
 
-       fep->full_duplex = duplex;
-
        /* Set MII speed */
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
@@ -1006,13 +1002,6 @@ fec_restart(struct net_device *ndev, int duplex)
 
        /* Enable interrupts we wish to service */
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-
-       if (netif_running(ndev)) {
-               netif_tx_unlock_bh(ndev);
-               netif_wake_queue(ndev);
-               napi_enable(&fep->napi);
-               netif_device_attach(ndev);
-       }
 }
 
 static void
@@ -1050,29 +1039,44 @@ fec_timeout(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       fec_dump(ndev);
+
        ndev->stats.tx_errors++;
 
-       fep->delay_work.timeout = true;
-       schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+       schedule_work(&fep->tx_timeout_work);
 }
 
-static void fec_enet_work(struct work_struct *work)
+static void fec_enet_timeout_work(struct work_struct *work)
 {
        struct fec_enet_private *fep =
-               container_of(work,
-                            struct fec_enet_private,
-                            delay_work.delay_work.work);
+               container_of(work, struct fec_enet_private, tx_timeout_work);
+       struct net_device *ndev = fep->netdev;
 
-       if (fep->delay_work.timeout) {
-               fep->delay_work.timeout = false;
-               fec_restart(fep->netdev, fep->full_duplex);
-               netif_wake_queue(fep->netdev);
+       rtnl_lock();
+       if (netif_device_present(ndev) || netif_running(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
+               fec_restart(ndev);
+               netif_wake_queue(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
        }
+       rtnl_unlock();
+}
 
-       if (fep->delay_work.trig_tx) {
-               fep->delay_work.trig_tx = false;
-               writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-       }
+static void
+fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
+       struct skb_shared_hwtstamps *hwtstamps)
+{
+       unsigned long flags;
+       u64 ns;
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       ns = timecounter_cyc2time(&fep->tc, ts);
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+       memset(hwtstamps, 0, sizeof(*hwtstamps));
+       hwtstamps->hwtstamp = ns_to_ktime(ns);
 }
 
 static void
@@ -1100,6 +1104,7 @@ fec_enet_tx(struct net_device *ndev)
                index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
 
                skb = fep->tx_skbuff[index];
+               fep->tx_skbuff[index] = NULL;
                if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        bdp->cbd_datlen, DMA_TO_DEVICE);
@@ -1132,20 +1137,12 @@ fec_enet_tx(struct net_device *ndev)
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
                        fep->bufdesc_ex) {
                        struct skb_shared_hwtstamps shhwtstamps;
-                       unsigned long flags;
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-                       spin_lock_irqsave(&fep->tmreg_lock, flags);
-                       shhwtstamps.hwtstamp = ns_to_ktime(
-                               timecounter_cyc2time(&fep->tc, ebdp->ts));
-                       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
                        skb_tstamp_tx(skb, &shhwtstamps);
                }
 
-               if (status & BD_ENET_TX_READY)
-                       netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
-
                /* Deferred means some collisions occurred during transmit,
                 * but we eventually sent the packet OK.
                 */
@@ -1154,7 +1151,6 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[index] = NULL;
 
                fep->dirty_tx = bdp;
 
@@ -1169,7 +1165,10 @@ fec_enet_tx(struct net_device *ndev)
                                netif_wake_queue(ndev);
                }
        }
-       return;
+
+       /* ERR006538: Keep the transmitter going */
+       if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
+               writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 }
 
 /* During a receive, the cur_rx points to the current incoming buffer.
@@ -1215,8 +1214,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
                if ((status & BD_ENET_RX_LAST) == 0)
                        netdev_err(ndev, "rcv is not +last\n");
 
-               if (!fep->opened)
-                       goto rx_processing_done;
+               writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
 
                /* Check for errors. */
                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1300,18 +1298,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
                        skb->protocol = eth_type_trans(skb, ndev);
 
                        /* Get receive timestamp from the skb */
-                       if (fep->hwts_rx_en && fep->bufdesc_ex) {
-                               struct skb_shared_hwtstamps *shhwtstamps =
-                                                           skb_hwtstamps(skb);
-                               unsigned long flags;
-
-                               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
-                               spin_lock_irqsave(&fep->tmreg_lock, flags);
-                               shhwtstamps->hwtstamp = ns_to_ktime(
-                                   timecounter_cyc2time(&fep->tc, ebdp->ts));
-                               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-                       }
+                       if (fep->hwts_rx_en && fep->bufdesc_ex)
+                               fec_enet_hwtstamp(fep, ebdp->ts,
+                                                 skb_hwtstamps(skb));
 
                        if (fep->bufdesc_ex &&
                            (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
@@ -1369,29 +1358,25 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
        uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       do {
-               int_events = readl(fep->hwp + FEC_IEVENT);
-               writel(int_events, fep->hwp + FEC_IEVENT);
+       int_events = readl(fep->hwp + FEC_IEVENT);
+       writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
 
-               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
-                       ret = IRQ_HANDLED;
+       if (int_events & napi_mask) {
+               ret = IRQ_HANDLED;
 
-                       /* Disable the RX interrupt */
-                       if (napi_schedule_prep(&fep->napi)) {
-                               writel(FEC_RX_DISABLED_IMASK,
-                                       fep->hwp + FEC_IMASK);
-                               __napi_schedule(&fep->napi);
-                       }
-               }
+               /* Disable the NAPI interrupts */
+               writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+               napi_schedule(&fep->napi);
+       }
 
-               if (int_events & FEC_ENET_MII) {
-                       ret = IRQ_HANDLED;
-                       complete(&fep->mdio_done);
-               }
-       } while (int_events);
+       if (int_events & FEC_ENET_MII) {
+               ret = IRQ_HANDLED;
+               complete(&fep->mdio_done);
+       }
 
        return ret;
 }
@@ -1399,8 +1384,16 @@ fec_enet_interrupt(int irq, void *dev_id)
 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
-       int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int pkts;
+
+       /*
+        * Clear any pending transmit or receive interrupts before
+        * processing the rings to avoid racing with the hardware.
+        */
+       writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
+
+       pkts = fec_enet_rx(ndev, budget);
 
        fec_enet_tx(ndev);
 
@@ -1498,14 +1491,23 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                return;
        }
 
-       if (phy_dev->link) {
+       /*
+        * If the netdev is down, or is going down, we're not interested
+        * in link state events, so just mark our idea of the link as down
+        * and ignore the event.
+        */
+       if (!netif_running(ndev) || !netif_device_present(ndev)) {
+               fep->link = 0;
+       } else if (phy_dev->link) {
                if (!fep->link) {
                        fep->link = phy_dev->link;
                        status_change = 1;
                }
 
-               if (fep->full_duplex != phy_dev->duplex)
+               if (fep->full_duplex != phy_dev->duplex) {
+                       fep->full_duplex = phy_dev->duplex;
                        status_change = 1;
+               }
 
                if (phy_dev->speed != fep->speed) {
                        fep->speed = phy_dev->speed;
@@ -1513,11 +1515,21 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                }
 
                /* if any of the above changed restart the FEC */
-               if (status_change)
-                       fec_restart(ndev, phy_dev->duplex);
+               if (status_change) {
+                       napi_disable(&fep->napi);
+                       netif_tx_lock_bh(ndev);
+                       fec_restart(ndev);
+                       netif_wake_queue(ndev);
+                       netif_tx_unlock_bh(ndev);
+                       napi_enable(&fep->napi);
+               }
        } else {
                if (fep->link) {
+                       napi_disable(&fep->napi);
+                       netif_tx_lock_bh(ndev);
                        fec_stop(ndev);
+                       netif_tx_unlock_bh(ndev);
+                       napi_enable(&fep->napi);
                        fep->link = phy_dev->link;
                        status_change = 1;
                }
@@ -1667,6 +1679,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        /* mask with MAC supported features */
        if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
                phy_dev->supported &= PHY_GBIT_FEATURES;
+               phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
 #if !defined(CONFIG_M5272)
                phy_dev->supported |= SUPPORTED_Pause;
 #endif
@@ -1870,6 +1883,9 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       if (!fep->phy_dev)
+               return -ENODEV;
+
        if (pause->tx_pause != pause->rx_pause) {
                netdev_info(ndev,
                        "hardware only support enable/disable both tx and rx");
@@ -1895,8 +1911,14 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                        fec_stop(ndev);
                phy_start_aneg(fep->phy_dev);
        }
-       if (netif_running(ndev))
-               fec_restart(ndev, 0);
+       if (netif_running(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
+               fec_restart(ndev);
+               netif_wake_queue(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
+       }
 
        return 0;
 }
@@ -2013,21 +2035,19 @@ static int fec_enet_nway_reset(struct net_device *dev)
 }
 
 static const struct ethtool_ops fec_enet_ethtool_ops = {
-#if !defined(CONFIG_M5272)
-       .get_pauseparam         = fec_enet_get_pauseparam,
-       .set_pauseparam         = fec_enet_set_pauseparam,
-#endif
        .get_settings           = fec_enet_get_settings,
        .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
-       .get_link               = ethtool_op_get_link,
-       .get_ts_info            = fec_enet_get_ts_info,
        .nway_reset             = fec_enet_nway_reset,
+       .get_link               = ethtool_op_get_link,
 #ifndef CONFIG_M5272
-       .get_ethtool_stats      = fec_enet_get_ethtool_stats,
+       .get_pauseparam         = fec_enet_get_pauseparam,
+       .set_pauseparam         = fec_enet_set_pauseparam,
        .get_strings            = fec_enet_get_strings,
+       .get_ethtool_stats      = fec_enet_get_ethtool_stats,
        .get_sset_count         = fec_enet_get_sset_count,
 #endif
+       .get_ts_info            = fec_enet_get_ts_info,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2061,18 +2081,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
                skb = fep->rx_skbuff[i];
-
-               if (bdp->cbd_bufaddr)
+               fep->rx_skbuff[i] = NULL;
+               if (skb) {
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (skb)
                        dev_kfree_skb(skb);
+               }
                bdp = fec_enet_get_nextdesc(bdp, fep);
        }
 
        bdp = fep->tx_bd_base;
-       for (i = 0; i < fep->tx_ring_size; i++)
+       for (i = 0; i < fep->tx_ring_size; i++) {
                kfree(fep->tx_bounce[i]);
+               fep->tx_bounce[i] = NULL;
+               skb = fep->tx_skbuff[i];
+               fep->tx_skbuff[i] = NULL;
+               dev_kfree_skb(skb);
+       }
 }
 
 static int fec_enet_alloc_buffers(struct net_device *ndev)
@@ -2084,21 +2109,23 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
 
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
+               dma_addr_t addr;
+
                skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
-               if (!skb) {
-                       fec_enet_free_buffers(ndev);
-                       return -ENOMEM;
-               }
-               fep->rx_skbuff[i] = skb;
+               if (!skb)
+                       goto err_alloc;
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+               addr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
-                       fec_enet_free_buffers(ndev);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
+                       dev_kfree_skb(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Rx DMA memory map failed\n");
-                       return -ENOMEM;
+                       goto err_alloc;
                }
+
+               fep->rx_skbuff[i] = skb;
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
 
                if (fep->bufdesc_ex) {
@@ -2116,6 +2143,8 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp = fep->tx_bd_base;
        for (i = 0; i < fep->tx_ring_size; i++) {
                fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+               if (!fep->tx_bounce[i])
+                       goto err_alloc;
 
                bdp->cbd_sc = 0;
                bdp->cbd_bufaddr = 0;
@@ -2133,6 +2162,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp->cbd_sc |= BD_SC_WRAP;
 
        return 0;
+
+ err_alloc:
+       fec_enet_free_buffers(ndev);
+       return -ENOMEM;
 }
 
 static int
@@ -2161,10 +2194,10 @@ fec_enet_open(struct net_device *ndev)
                return ret;
        }
 
+       fec_restart(ndev);
        napi_enable(&fep->napi);
        phy_start(fep->phy_dev);
        netif_start_queue(ndev);
-       fep->opened = 1;
        return 0;
 }
 
@@ -2173,17 +2206,17 @@ fec_enet_close(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       /* Don't know what to do yet. */
-       napi_disable(&fep->napi);
-       fep->opened = 0;
-       netif_stop_queue(ndev);
-       fec_stop(ndev);
+       phy_stop(fep->phy_dev);
 
-       if (fep->phy_dev) {
-               phy_stop(fep->phy_dev);
-               phy_disconnect(fep->phy_dev);
+       if (netif_device_present(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_disable(ndev);
+               fec_stop(ndev);
        }
 
+       phy_disconnect(fep->phy_dev);
+       fep->phy_dev = NULL;
+
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        fec_enet_free_buffers(ndev);
@@ -2310,12 +2343,21 @@ static void fec_poll_controller(struct net_device *dev)
 }
 #endif
 
+#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
+
 static int fec_set_features(struct net_device *netdev,
        netdev_features_t features)
 {
        struct fec_enet_private *fep = netdev_priv(netdev);
        netdev_features_t changed = features ^ netdev->features;
 
+       /* Quiesce the device if necessary */
+       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(netdev);
+               fec_stop(netdev);
+       }
+
        netdev->features = features;
 
        /* Receive checksum has been changed */
@@ -2324,14 +2366,14 @@ static int fec_set_features(struct net_device *netdev,
                        fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
                else
                        fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+       }
 
-               if (netif_running(netdev)) {
-                       fec_stop(netdev);
-                       fec_restart(netdev, fep->phy_dev->duplex);
-                       netif_wake_queue(netdev);
-               } else {
-                       fec_restart(netdev, fep->phy_dev->duplex);
-               }
+       /* Resume the device after updates */
+       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+               fec_restart(netdev);
+               netif_wake_queue(netdev);
+               netif_tx_unlock_bh(netdev);
+               napi_enable(&fep->napi);
        }
 
        return 0;
@@ -2432,7 +2474,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        ndev->hw_features = ndev->features;
 
-       fec_restart(ndev, 0);
+       fec_restart(ndev);
 
        return 0;
 }
@@ -2615,7 +2657,7 @@ fec_probe(struct platform_device *pdev)
        if (fep->bufdesc_ex && fep->ptp_clock)
                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 
-       INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
+       INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
        return 0;
 
 failed_register:
@@ -2640,7 +2682,7 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       cancel_delayed_work_sync(&(fep->delay_work.delay_work));
+       cancel_work_sync(&fep->tx_timeout_work);
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);
@@ -2661,10 +2703,17 @@ fec_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       rtnl_lock();
        if (netif_running(ndev)) {
-               fec_stop(ndev);
+               phy_stop(fep->phy_dev);
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
                netif_device_detach(ndev);
+               netif_tx_unlock_bh(ndev);
+               fec_stop(ndev);
        }
+       rtnl_unlock();
+
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
 
@@ -2692,10 +2741,16 @@ fec_resume(struct device *dev)
        if (ret)
                goto failed_clk;
 
+       rtnl_lock();
        if (netif_running(ndev)) {
-               fec_restart(ndev, fep->full_duplex);
+               fec_restart(ndev);
+               netif_tx_lock_bh(ndev);
                netif_device_attach(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
+               phy_start(fep->phy_dev);
        }
+       rtnl_unlock();
 
        return 0;
 
index 36fc429298e353191cc93fa10140e298bc7cff85..8ceaf7a2660c8f3f38dc27ce71701a3290ce14d2 100644 (file)
@@ -2396,7 +2396,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
                if (netif_msg_ifup(ugeth))
                        pr_err("Bad number of Rx threads value\n");
                return -EINVAL;
-               break;
        }
 
        switch (ug_info->numThreadsTx) {
@@ -2419,7 +2418,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
                if (netif_msg_ifup(ugeth))
                        pr_err("Bad number of Tx threads value\n");
                return -EINVAL;
-               break;
        }
 
        /* Calculate rx_extended_features */
index d50f78afb56d8715d8b90d936216d22bacaaa70e..cca5bca44e737dd6fdd23e758bcffb8e2ce119e7 100644 (file)
@@ -1316,7 +1316,6 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
        case e1000_82547:
        case e1000_82547_rev_2:
                return e1000_integrated_phy_loopback(adapter);
-               break;
        default:
                /* Default PHY loopback work is to read the MII
                 * control register and assert bit 14 (loopback mode).
@@ -1325,7 +1324,6 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
                phy_reg |= MII_CR_LOOPBACK;
                e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
                return 0;
-               break;
        }
 
        return 8;
@@ -1344,7 +1342,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
                case e1000_82545_rev_3:
                case e1000_82546_rev_3:
                        return e1000_set_phy_loopback(adapter);
-                       break;
                default:
                        rctl = er32(RCTL);
                        rctl |= E1000_RCTL_LBM_TCVR;
index e9b07ccc0ebaebc0b1b0993625485cae93b0b447..1acf5034db10b3411507023a7b25abd6011a1836 100644 (file)
@@ -902,7 +902,6 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
        default:
                e_dbg("Flow control param set incorrectly\n");
                return -E1000_ERR_CONFIG;
-               break;
        }
 
        /* Since auto-negotiation is enabled, take the link out of reset (the
@@ -5041,7 +5040,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
                        break;
                default:
                        return -E1000_ERR_PHY;
-                       break;
                }
        } else if (hw->phy_type == e1000_phy_igp) {     /* For IGP PHY */
                u16 cur_agc_value;
index 218481e509f99f4e32deba736ae4117365c650e3..dc79ed85030b73e8ccbbdc57c05ebe09085a4bee 100644 (file)
@@ -95,7 +95,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        /* This can only be done after all function pointers are setup. */
@@ -422,7 +421,6 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
                break;
        case e1000_82573:
                return e1000e_get_phy_id(hw);
-               break;
        case e1000_82574:
        case e1000_82583:
                ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
@@ -440,7 +438,6 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        return 0;
@@ -1458,7 +1455,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        if (ret_val)
index 815e26c6d34b85a54c554de5863e200302d6eacd..865ce45f9ec3424733dd3db43d9e635183575604 100644 (file)
@@ -1521,11 +1521,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
                switch (hw->mac.type) {
                case e1000_80003es2lan:
                        return e1000_set_es2lan_mac_loopback(adapter);
-                       break;
                case e1000_82571:
                case e1000_82572:
                        return e1000_set_82571_fiber_loopback(adapter);
-                       break;
                default:
                        rctl = er32(RCTL);
                        rctl |= E1000_RCTL_LBM_TCVR;
index 8894ab8ed6bd82de2c0f720939d31a906096a12c..f236861c2a3194a7e4a88ee57085761f6234ee84 100644 (file)
@@ -572,7 +572,6 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        return 0;
index 8c386f3a15ebb6649e347f5316555827b151819b..30b74d590bee461663291d803bbfd06e042ba255 100644 (file)
@@ -787,7 +787,6 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
        default:
                e_dbg("Flow control param set incorrectly\n");
                return -E1000_ERR_CONFIG;
-               break;
        }
 
        ew32(TXCW, txcw);
index 65985846345deea3cc16d6f3cfd26add64dbfa7f..29cd81ae29f43e0abd801084b7f10b0f7d6a8871 100644 (file)
@@ -84,6 +84,7 @@
 #define I40E_AQ_WORK_LIMIT            16
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -133,7 +134,9 @@ enum i40e_state_t {
        __I40E_EMP_RESET_REQUESTED,
        __I40E_FILTER_OVERFLOW_PROMISC,
        __I40E_SUSPENDED,
+       __I40E_PTP_TX_IN_PROGRESS,
        __I40E_BAD_EEPROM,
+       __I40E_DOWN_REQUESTED,
 };
 
 enum i40e_interrupt_policy {
@@ -152,7 +155,7 @@ struct i40e_lump_tracking {
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
-#define I40E_FDIR_BUFFER_HEAD_ROOM     200
+#define I40E_FDIR_BUFFER_HEAD_ROOM     32
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -277,6 +280,7 @@ struct i40e_pf {
 #ifdef CONFIG_I40E_VXLAN
 #define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
 #endif
+#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
 #define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
 
        /* tracks features that get auto disabled by errors */
@@ -348,6 +352,7 @@ struct i40e_pf {
        u32 rx_hwtstamp_cleared;
        bool ptp_tx;
        bool ptp_rx;
+       u16 rss_table_size;
 };
 
 struct i40e_mac_filter {
@@ -359,6 +364,7 @@ struct i40e_mac_filter {
        bool is_vf;             /* filter belongs to a VF */
        bool is_netdev;         /* filter belongs to a netdev */
        bool changed;           /* filter needs to be sync'd to the HW */
+       bool is_laa;            /* filter is a Locally Administered Address */
 };
 
 struct i40e_veb {
@@ -578,6 +584,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
 int i40e_get_current_fd_count(struct i40e_pf *pf);
+int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -614,6 +621,7 @@ static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
index 7a027499fc57fc968d9ca33df5c65bb9e6825123..0e551f281d594d78cdabb61c1f681cc5a559e514 100644 (file)
@@ -55,16 +55,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
                hw->aq.asq.tail = I40E_VF_ATQT1;
                hw->aq.asq.head = I40E_VF_ATQH1;
                hw->aq.asq.len  = I40E_VF_ATQLEN1;
+               hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+               hw->aq.asq.bah  = I40E_VF_ATQBAH1;
                hw->aq.arq.tail = I40E_VF_ARQT1;
                hw->aq.arq.head = I40E_VF_ARQH1;
                hw->aq.arq.len  = I40E_VF_ARQLEN1;
+               hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+               hw->aq.arq.bah  = I40E_VF_ARQBAH1;
        } else {
                hw->aq.asq.tail = I40E_PF_ATQT;
                hw->aq.asq.head = I40E_PF_ATQH;
                hw->aq.asq.len  = I40E_PF_ATQLEN;
+               hw->aq.asq.bal  = I40E_PF_ATQBAL;
+               hw->aq.asq.bah  = I40E_PF_ATQBAH;
                hw->aq.arq.tail = I40E_PF_ARQT;
                hw->aq.arq.head = I40E_PF_ARQH;
                hw->aq.arq.len  = I40E_PF_ARQLEN;
+               hw->aq.arq.bal  = I40E_PF_ARQBAL;
+               hw->aq.arq.bah  = I40E_PF_ARQBAH;
        }
 }
 
@@ -296,27 +304,18 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the transmit queue */
-               wr32(hw, I40E_VF_ATQBAH1,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQBAL1,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
-                                         I40E_VF_ATQLEN1_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ATQBAL1);
-       } else {
-               /* configure the transmit queue */
-               wr32(hw, I40E_PF_ATQBAH,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQBAL,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
-                                         I40E_PF_ATQLEN_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ATQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+                                 I40E_PF_ATQLEN_ATQENABLE_MASK));
+       wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+       wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.asq.bal);
        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -334,30 +333,21 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the receive queue */
-               wr32(hw, I40E_VF_ARQBAH1,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQBAL1,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
-                                         I40E_VF_ARQLEN1_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ARQBAL1);
-       } else {
-               /* configure the receive queue */
-               wr32(hw, I40E_PF_ARQBAH,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQBAL,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
-                                         I40E_PF_ARQLEN_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ARQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+                                 I40E_PF_ARQLEN_ARQENABLE_MASK));
+       wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+       wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
        /* Update tail in the HW to post pre-allocated buffers */
        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.arq.bal);
        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -499,6 +489,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.tail, 0);
        wr32(hw, hw->aq.asq.len, 0);
+       wr32(hw, hw->aq.asq.bal, 0);
+       wr32(hw, hw->aq.asq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.asq_mutex);
@@ -530,6 +522,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.tail, 0);
        wr32(hw, hw->aq.arq.len, 0);
+       wr32(hw, hw->aq.arq.bal, 0);
+       wr32(hw, hw->aq.arq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.arq_mutex);
@@ -577,6 +571,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        /* Set up register offsets */
        i40e_adminq_init_regs(hw);
 
+       /* setup ASQ command write back timeout */
+       hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
        /* allocate the ASQ */
        ret_code = i40e_init_asq(hw);
        if (ret_code)
@@ -677,6 +674,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "%s: ntc %d head %d.\n", __func__, ntc,
+                          rd32(hw, hw->aq.asq.head));
+
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
                                        (I40E_ADMINQ_CALLBACK)details->callback;
@@ -736,6 +737,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        struct i40e_aq_desc *desc_on_ring;
        bool cmd_completed = false;
        u16  retval = 0;
+       u32  val = 0;
+
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: head overrun at %d\n", val);
+               status = I40E_ERR_QUEUE_EMPTY;
+               goto asq_send_command_exit;
+       }
 
        if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -829,6 +839,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        }
 
        /* bump the tail */
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
        (hw->aq.asq.next_to_use)++;
        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
@@ -852,7 +863,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                        /* ugh! delay while spin_lock */
                        udelay(delay_len);
                        total_delay += delay_len;
-               } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+               } while (total_delay < hw->aq.asq_cmd_timeout);
        }
 
        /* if ready, copy the desc back to temp */
@@ -866,6 +877,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                                   I40E_DEBUG_AQ_MESSAGE,
                                   "AQTX: Command completed with error 0x%X.\n",
                                   retval);
+
                        /* strip off FW internal code */
                        retval &= 0xff;
                }
@@ -880,6 +892,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        if (i40e_is_nvm_update_op(desc))
                hw->aq.nvm_busy = true;
 
+       if (le16_to_cpu(desc->datalen) == buff_size) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: desc and buffer writeback:\n");
+               i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
+       }
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -951,10 +969,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        /* now clean the next descriptor */
        desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
        desc_idx = ntc;
-       i40e_debug_aq(hw,
-                     I40E_DEBUG_AQ_COMMAND,
-                     (void *)desc,
-                     hw->aq.arq.r.arq_bi[desc_idx].va);
 
        flags = le16_to_cpu(desc->flags);
        if (flags & I40E_AQ_FLAG_ERR) {
@@ -977,6 +991,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        if (i40e_is_nvm_update_op(&e->desc))
                hw->aq.nvm_busy = false;
 
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
+
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
         * size
index b1552fbc48a0e6876acf838ab7eadf1e72fe492e..bb76be1d38f73643fbdc78c413822d38bac441ff 100644 (file)
@@ -56,6 +56,8 @@ struct i40e_adminq_ring {
        u32 head;
        u32 tail;
        u32 len;
+       u32 bah;
+       u32 bal;
 };
 
 /* ASQ transaction details */
@@ -82,6 +84,7 @@ struct i40e_arq_event_info {
 struct i40e_adminq_info {
        struct i40e_adminq_ring arq;    /* receive queue */
        struct i40e_adminq_ring asq;    /* send queue */
+       u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
        u16 num_arq_entries;            /* receive queue depth */
        u16 num_asq_entries;            /* send queue depth */
        u16 arq_buf_size;               /* receive queue buffer size */
index 6e65f19dd6e58aadf1fc7df3df41ce8a617be959..c65f4e8e6cee0800c5568fed1984e3914e3231ac 100644 (file)
@@ -554,7 +554,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
                break;
        default:
                return I40E_ERR_DEVICE_NOT_SUPPORTED;
-               break;
        }
 
        hw->phy.get_link_info = true;
@@ -654,6 +653,31 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
        return status;
 }
 
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+       if (status)
+               return status;
+
+       if (flags & I40E_AQC_PORT_ADDR_VALID)
+               memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+       else
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+
 /**
  * i40e_pre_tx_queue_cfg - pre tx queue configure
  * @hw: pointer to the HW structure
@@ -669,8 +693,10 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
        u32 reg_block = 0;
        u32 reg_val;
 
-       if (abs_queue_idx >= 128)
+       if (abs_queue_idx >= 128) {
                reg_block = abs_queue_idx / 128;
+               abs_queue_idx %= 128;
+       }
 
        reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
        reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
@@ -810,6 +836,99 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
        return 0;
 }
 
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+       u32 num_queues, base_queue;
+       u32 num_pf_int;
+       u32 num_vf_int;
+       u32 num_vfs;
+       u32 i, j;
+       u32 val;
+       u32 eol = 0x7ff;
+
+       /* get number of interrupts, queues, and vfs */
+       val = rd32(hw, I40E_GLPCI_CNF2);
+       num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+                    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+       num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+                    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+       val = rd32(hw, I40E_PFLAN_QALLOC);
+       base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+                    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+       j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+           I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+       if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+               num_queues = (j - base_queue) + 1;
+       else
+               num_queues = 0;
+
+       val = rd32(hw, I40E_PF_VT_PFALLOC);
+       i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+           I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+       j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+           I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+       if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+               num_vfs = (j - i) + 1;
+       else
+               num_vfs = 0;
+
+       /* stop all the interrupts */
+       wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+       val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+       /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+       val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_LNKLST0, val);
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+       val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       for (i = 0; i < num_vfs; i++)
+               wr32(hw, I40E_VPINT_LNKLST0(i), val);
+       for (i = 0; i < num_vf_int - 2; i++)
+               wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+       /* warn the HW of the coming Tx disables */
+       for (i = 0; i < num_queues; i++) {
+               u32 abs_queue_idx = base_queue + i;
+               u32 reg_block = 0;
+
+               if (abs_queue_idx >= 128) {
+                       reg_block = abs_queue_idx / 128;
+                       abs_queue_idx %= 128;
+               }
+
+               val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+               val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+               val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+               val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+               wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+       }
+       udelay(400);
+
+       /* stop all the queues */
+       for (i = 0; i < num_queues; i++) {
+               wr32(hw, I40E_QINT_TQCTL(i), 0);
+               wr32(hw, I40E_QTX_ENA(i), 0);
+               wr32(hw, I40E_QINT_RQCTL(i), 0);
+               wr32(hw, I40E_QRX_ENA(i), 0);
+       }
+
+       /* short wait for all queue disables to settle */
+       udelay(50);
+}
+
 /**
  * i40e_clear_pxe_mode - clear pxe operations mode
  * @hw: pointer to the hw struct
@@ -941,6 +1060,164 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 
 /* Admin command wrappers */
 
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+       u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+       if (!abilities)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_phy_abilities);
+
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       if (abilities_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+       if (qualified_modules)
+               desc.params.external.param0 |=
+                       cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+       if (report_init)
+               desc.params.external.param0 |=
+                       cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+       status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+                                      cmd_details);
+
+       if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+               status = I40E_ERR_UNKNOWN_PHY;
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_set_phy_config *cmd =
+                       (struct i40e_aq_set_phy_config *)&desc.params.raw;
+       enum i40e_status_code status;
+
+       if (!config)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_phy_config);
+
+       *cmd = *config;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_restart)
+{
+       enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       enum i40e_status_code status;
+       u8 pause_mask = 0x0;
+
+       *aq_failures = 0x0;
+
+       switch (fc_mode) {
+       case I40E_FC_FULL:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_RX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_TX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               break;
+       default:
+               break;
+       }
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status) {
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+               return status;
+       }
+
+       memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+       /* clear the old pause settings */
+       config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+                          ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+       /* set the new abilities */
+       config.abilities |= pause_mask;
+       /* If the abilities have changed, then set the new config */
+       if (config.abilities != abilities.abilities) {
+               /* Auto restart link so settings take effect */
+               if (atomic_restart)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+               /* Copy over all the old settings */
+               config.phy_type = abilities.phy_type;
+               config.link_speed = abilities.link_speed;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+               if (status)
+                       *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+       }
+       /* Update the link info */
+       status = i40e_update_link_info(hw, true);
+       if (status) {
+               /* Wait a little bit (on 40G cards it sometimes takes a really
+                * long time for link to come back from the atomic reset)
+                * and try once more
+                */
+               msleep(1000);
+               status = i40e_update_link_info(hw, true);
+       }
+       if (status)
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+       return status;
+}
+
 /**
  * i40e_aq_clear_pxe_mode
  * @hw: pointer to the hw struct
@@ -971,12 +1248,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 /**
  * i40e_aq_set_link_restart_an
  * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
  * @cmd_details: pointer to command details structure or NULL
  *
  * Sets up the link and restarts the Auto-Negotiation over the link.
  **/
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-                               struct i40e_asq_cmd_details *cmd_details)
+                                       bool enable_link,
+                                       struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_set_link_restart_an *cmd =
@@ -987,6 +1266,10 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
                                          i40e_aqc_opc_set_link_restart_an);
 
        cmd->command = I40E_AQ_PHY_RESTART_AN;
+       if (enable_link)
+               cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+       else
+               cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -1011,6 +1294,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
                (struct i40e_aqc_get_link_status *)&desc.params.raw;
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        i40e_status status;
+       bool tx_pause, rx_pause;
        u16 command_flags;
 
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
@@ -1040,6 +1324,18 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
        hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
 
+       /* update fc info */
+       tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+       rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+       if (tx_pause & rx_pause)
+               hw->fc.current_mode = I40E_FC_FULL;
+       else if (tx_pause)
+               hw->fc.current_mode = I40E_FC_TX_PAUSE;
+       else if (rx_pause)
+               hw->fc.current_mode = I40E_FC_RX_PAUSE;
+       else
+               hw->fc.current_mode = I40E_FC_NONE;
+
        if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
                hw_link_info->crc_enable = true;
        else
@@ -1061,6 +1357,35 @@ aq_get_link_info_exit:
        return status;
 }
 
+/**
+ * i40e_update_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ *
+ * Returns the link status of the adapter
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       i40e_status status;
+
+       status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
+       if (status)
+               return status;
+
+       status = i40e_aq_get_phy_capabilities(hw, false, false,
+                                             &abilities, NULL);
+       if (status)
+               return status;
+
+       if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
+               hw->phy.link_info.an_enabled = true;
+       else
+               hw->phy.link_info.an_enabled = false;
+
+       return status;
+}
+
 /**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
@@ -1839,7 +2164,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        struct i40e_aqc_list_capabilities_element_resp *cap;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
-       u32 reg_val;
        u32 i = 0;
        u16 id;
 
@@ -1910,11 +2234,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        break;
                case I40E_DEV_FUNC_CAP_RSS:
                        p->rss = true;
-                       reg_val = rd32(hw, I40E_PFQF_CTL_0);
-                       if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
-                               p->rss_table_size = number;
-                       else
-                               p->rss_table_size = 128;
+                       p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
                        break;
                case I40E_DEV_FUNC_CAP_RX_QUEUES:
index cffdfc21290fdced44a5b1265eb2fb04a23608cf..ec07332e109eb02e21b1a3315219571fbf572c92 100644 (file)
@@ -1743,6 +1743,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
        } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+       } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
+               dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
+                        i40e_get_current_fd_count(pf));
        } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
                if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
                        int ret;
@@ -1830,7 +1833,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
                        ret = i40e_aq_get_lldp_mib(&pf->hw,
                                        I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
-                                       I40E_AQ_LLDP_MIB_LOCAL,
+                                       I40E_AQ_LLDP_MIB_REMOTE,
                                        buff, I40E_LLDPDU_SIZE,
                                        &llen, &rlen, NULL);
                        if (ret) {
@@ -1962,6 +1965,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  fd-atr off\n");
                dev_info(&pf->pdev->dev, "  fd-atr on\n");
+               dev_info(&pf->pdev->dev, "  fd current cnt");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
                dev_info(&pf->pdev->dev, "  lldp get local\n");
index 4a488ffcd6b0b1221e31755116acf7830f49d20c..3abd3cbab75fe8730c41d8f1cd1e76e72430cdc4 100644 (file)
@@ -215,52 +215,135 @@ static int i40e_get_settings(struct net_device *netdev,
        /* hardware is either in 40G mode or 10G mode
         * NOTE: this section initializes supported and advertising
         */
+       if (!link_up) {
+               /* link is down and the driver needs to fall back on
+                * device ID to determine what kinds of info to display,
+                * it's mostly a guess that may change when link is up
+                */
+               switch (hw->device_id) {
+               case I40E_DEV_ID_QSFP_A:
+               case I40E_DEV_ID_QSFP_B:
+               case I40E_DEV_ID_QSFP_C:
+                       /* pluggable QSFP */
+                       ecmd->supported = SUPPORTED_40000baseSR4_Full |
+                                         SUPPORTED_40000baseCR4_Full |
+                                         SUPPORTED_40000baseLR4_Full;
+                       ecmd->advertising = ADVERTISED_40000baseSR4_Full |
+                                           ADVERTISED_40000baseCR4_Full |
+                                           ADVERTISED_40000baseLR4_Full;
+                       break;
+               case I40E_DEV_ID_KX_B:
+                       /* backplane 40G */
+                       ecmd->supported = SUPPORTED_40000baseKR4_Full;
+                       ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+                       break;
+               case I40E_DEV_ID_KX_C:
+                       /* backplane 10G */
+                       ecmd->supported = SUPPORTED_10000baseKR_Full;
+                       ecmd->advertising = ADVERTISED_10000baseKR_Full;
+                       break;
+               default:
+                       /* all the rest are 10G/1G */
+                       ecmd->supported = SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_1000baseT_Full;
+                       ecmd->advertising = ADVERTISED_10000baseT_Full |
+                                           ADVERTISED_1000baseT_Full;
+                       break;
+               }
+
+               /* skip phy_type use as it is zero when link is down */
+               goto no_valid_phy_type;
+       }
+
        switch (hw_link_info->phy_type) {
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_40GBASE_CR4_CU:
-               ecmd->supported = SUPPORTED_40000baseCR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_KR4:
-               ecmd->supported = SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseKR4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseKR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_SR4:
+       case I40E_PHY_TYPE_XLPPI:
+       case I40E_PHY_TYPE_XLAUI:
                ecmd->supported = SUPPORTED_40000baseSR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ecmd->supported = SUPPORTED_40000baseLR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseLR4_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_KX4:
-               ecmd->supported = SUPPORTED_10000baseKX4_Full;
-               ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseKX4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseKX4_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_KR:
-               ecmd->supported = SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_10000baseKR_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseKR_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseKR_Full;
                break;
-       default:
-               if (i40e_is_40G_device(hw->device_id)) {
-                       ecmd->supported = SUPPORTED_40000baseSR4_Full;
-                       ecmd->advertising = ADVERTISED_40000baseSR4_Full;
-               } else {
-                       ecmd->supported = SUPPORTED_10000baseT_Full;
-                       ecmd->advertising = ADVERTISED_10000baseT_Full;
-               }
+       case I40E_PHY_TYPE_10GBASE_SR:
+       case I40E_PHY_TYPE_10GBASE_LR:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_CR1_CU:
+       case I40E_PHY_TYPE_10GBASE_CR1:
+       case I40E_PHY_TYPE_10GBASE_T:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_XAUI:
+       case I40E_PHY_TYPE_XFI:
+       case I40E_PHY_TYPE_SFI:
+       case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
                break;
+       case I40E_PHY_TYPE_1000BASE_KX:
+       case I40E_PHY_TYPE_1000BASE_T:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_100baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_100baseT_Full;
+               break;
+       case I40E_PHY_TYPE_SGMII:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full |
+                                 SUPPORTED_100baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full |
+                                   ADVERTISED_100baseT_Full;
+               break;
+       default:
+               /* if we got here and link is up something bad is afoot */
+               WARN_ON(link_up);
        }
 
-       ecmd->supported |= SUPPORTED_Autoneg;
-       ecmd->advertising |= ADVERTISED_Autoneg;
+no_valid_phy_type:
+       /* this is if autoneg is enabled or disabled */
        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
 
        switch (hw->phy.media_type) {
        case I40E_MEDIA_TYPE_BACKPLANE:
-               ecmd->supported |= SUPPORTED_Backplane;
-               ecmd->advertising |= ADVERTISED_Backplane;
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_Backplane;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_Backplane;
                ecmd->port = PORT_NONE;
                break;
        case I40E_MEDIA_TYPE_BASET:
@@ -276,7 +359,6 @@ static int i40e_get_settings(struct net_device *netdev,
                break;
        case I40E_MEDIA_TYPE_FIBER:
                ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
                ecmd->port = PORT_FIBRE;
                break;
        case I40E_MEDIA_TYPE_UNKNOWN:
@@ -287,6 +369,25 @@ static int i40e_get_settings(struct net_device *netdev,
 
        ecmd->transceiver = XCVR_EXTERNAL;
 
+       ecmd->supported |= SUPPORTED_Pause;
+
+       switch (hw->fc.current_mode) {
+       case I40E_FC_FULL:
+               ecmd->advertising |= ADVERTISED_Pause;
+               break;
+       case I40E_FC_TX_PAUSE:
+               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               break;
+       case I40E_FC_RX_PAUSE:
+               ecmd->advertising |= (ADVERTISED_Pause |
+                                     ADVERTISED_Asym_Pause);
+               break;
+       default:
+               ecmd->advertising &= ~(ADVERTISED_Pause |
+                                      ADVERTISED_Asym_Pause);
+               break;
+       }
+
        if (link_up) {
                switch (link_speed) {
                case I40E_LINK_SPEED_40GB:
@@ -296,6 +397,9 @@ static int i40e_get_settings(struct net_device *netdev,
                case I40E_LINK_SPEED_10GB:
                        ethtool_cmd_speed_set(ecmd, SPEED_10000);
                        break;
+               case I40E_LINK_SPEED_1GB:
+                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
+                       break;
                default:
                        break;
                }
@@ -308,6 +412,182 @@ static int i40e_get_settings(struct net_device *netdev,
        return 0;
 }
 
+/**
+ * i40e_set_settings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Set speed/duplex per media_types advertised/forced
+ **/
+static int i40e_set_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct ethtool_cmd safe_ecmd;
+       i40e_status status = 0;
+       bool change = false;
+       int err = 0;
+       u8 autoneg;
+       u32 advertise;
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE)
+               return -EOPNOTSUPP;
+
+       /* get our own copy of the bits to check against */
+       memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
+       i40e_get_settings(netdev, &safe_ecmd);
+
+       /* save autoneg and speed out of ecmd */
+       autoneg = ecmd->autoneg;
+       advertise = ecmd->advertising;
+
+       /* set autoneg and speed back to what they currently are */
+       ecmd->autoneg = safe_ecmd.autoneg;
+       ecmd->advertising = safe_ecmd.advertising;
+
+       ecmd->cmd = safe_ecmd.cmd;
+       /* If ecmd and safe_ecmd are not the same now, then they are
+        * trying to set something that we do not support
+        */
+       if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+               return -EOPNOTSUPP;
+
+       while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
+               usleep_range(1000, 2000);
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status)
+               return -EAGAIN;
+
+       /* Copy link_speed and abilities to config in case they are not
+        * set below
+        */
+       memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+       config.link_speed = abilities.link_speed;
+       config.abilities = abilities.abilities;
+
+       /* Check autoneg */
+       if (autoneg == AUTONEG_ENABLE) {
+               /* If autoneg is not supported, return error */
+               if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                       netdev_info(netdev, "Autoneg not supported on this phy\n");
+                       return -EINVAL;
+               }
+               /* If autoneg was not already enabled */
+               if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+                       config.abilities = abilities.abilities |
+                                          I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       } else {
+               /* If autoneg is supported 10GBASE_T is the only phy that
+                * can disable it, so otherwise return error
+                */
+               if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                   hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
+                       netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+                       return -EINVAL;
+               }
+               /* If autoneg is currently enabled */
+               if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+                       config.abilities = abilities.abilities |
+                                          ~I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       }
+
+       if (advertise & ~safe_ecmd.supported)
+               return -EINVAL;
+
+       if (advertise & ADVERTISED_100baseT_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) {
+                       config.link_speed |= I40E_LINK_SPEED_100MB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_1000baseT_Full ||
+           advertise & ADVERTISED_1000baseKX_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_1GB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_10000baseT_Full ||
+           advertise & ADVERTISED_10000baseKX4_Full ||
+           advertise & ADVERTISED_10000baseKR_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_10GB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_40000baseKR4_Full ||
+           advertise & ADVERTISED_40000baseCR4_Full ||
+           advertise & ADVERTISED_40000baseSR4_Full ||
+           advertise & ADVERTISED_40000baseLR4_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_40GB;
+                       change = true;
+               }
+
+       if (change) {
+               /* copy over the rest of the abilities */
+               config.phy_type = abilities.phy_type;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+
+               /* If link is up set link and an so changes take effect */
+               if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+
+               /* make the aq call */
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+               if (status) {
+                       netdev_info(netdev, "Set phy config failed with error %d.\n",
+                                   status);
+                       return -EAGAIN;
+               }
+
+               status = i40e_update_link_info(hw, true);
+               if (status)
+                       netdev_info(netdev, "Updating link info failed with error %d\n",
+                                   status);
+
+       } else {
+               netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+       }
+
+       return err;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+       /* restart autonegotiation */
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+       i40e_status ret = 0;
+
+       ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+       if (ret) {
+               netdev_info(netdev, "link restart failed, aq_err=%d\n",
+                           pf->hw.aq.asq_last_status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /**
  * i40e_get_pauseparam -  Get Flow Control status
  * Return tx/rx-pause status
@@ -334,6 +614,81 @@ static void i40e_get_pauseparam(struct net_device *netdev,
        }
 }
 
+/**
+ * i40e_set_pauseparam - Set Flow Control parameter
+ * @netdev: network interface device structure
+ * @pause: return tx/rx flow control status
+ **/
+static int i40e_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+       i40e_status status;
+       u8 aq_failures;
+       int err;
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+           AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+               netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we have link and don't have autoneg */
+       if (!test_bit(__I40E_DOWN, &pf->state) &&
+           !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+               /* Send message that it might not necessarily work*/
+               netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
+       }
+
+       if (hw->fc.current_mode == I40E_FC_PFC) {
+               netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_FULL;
+       else if (pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_RX_PAUSE;
+       else if (!pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_TX_PAUSE;
+       else if (!pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_NONE;
+       else
+                return -EINVAL;
+
+       /* Set the fc mode and only restart an if link is up*/
+       status = i40e_set_fc(hw, &aq_failures, link_up);
+
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+               netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+
+       if (!test_bit(__I40E_DOWN, &pf->state))
+               return i40e_nway_reset(netdev);
+
+       return err;
+}
+
 static u32 i40e_get_msglevel(struct net_device *netdev)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -1021,24 +1376,6 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        return 0;
 }
 
-static int i40e_nway_reset(struct net_device *netdev)
-{
-       /* restart autonegotiation */
-       struct i40e_netdev_priv *np = netdev_priv(netdev);
-       struct i40e_pf *pf = np->vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       i40e_status ret = 0;
-
-       ret = i40e_aq_set_link_restart_an(hw, NULL);
-       if (ret) {
-               netdev_info(netdev, "link restart failed, aq_err=%d\n",
-                           pf->hw.aq.asq_last_status);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 static int i40e_set_phys_id(struct net_device *netdev,
                            enum ethtool_phys_id_state state)
 {
@@ -1105,17 +1442,36 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
+       vector = vsi->base_vector;
        if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
-           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-       else
+       } else if (ec->rx_coalesce_usecs == 0) {
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+               i40e_irq_dynamic_disable(vsi, vector);
+               if (ec->use_adaptive_rx_coalesce)
+                       netif_info(pf, drv, netdev,
+                                  "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev,
+                          "Invalid value, Rx-usecs range is 0, 8-8160\n");
                return -EINVAL;
+       }
 
        if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
-           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
-       else
+       } else if (ec->tx_coalesce_usecs == 0) {
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+               i40e_irq_dynamic_disable(vsi, vector);
+               if (ec->use_adaptive_tx_coalesce)
+                       netif_info(pf, drv, netdev,
+                                  "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev,
+                          "Invalid value, Tx-usecs range is 0, 8-8160\n");
                return -EINVAL;
+       }
 
        if (ec->use_adaptive_rx_coalesce)
                vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
@@ -1127,7 +1483,6 @@ static int i40e_set_coalesce(struct net_device *netdev,
        else
                vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
-       vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
                q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
@@ -1731,6 +2086,7 @@ static int i40e_set_channels(struct net_device *dev,
 
 static const struct ethtool_ops i40e_ethtool_ops = {
        .get_settings           = i40e_get_settings,
+       .set_settings           = i40e_set_settings,
        .get_drvinfo            = i40e_get_drvinfo,
        .get_regs_len           = i40e_get_regs_len,
        .get_regs               = i40e_get_regs,
@@ -1743,6 +2099,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .get_ringparam          = i40e_get_ringparam,
        .set_ringparam          = i40e_set_ringparam,
        .get_pauseparam         = i40e_get_pauseparam,
+       .set_pauseparam         = i40e_set_pauseparam,
        .get_msglevel           = i40e_get_msglevel,
        .set_msglevel           = i40e_set_msglevel,
        .get_rxnfc              = i40e_get_rxnfc,
index b45d8fedc5e755e712e753c4f6b47a977545c45a..732a02660330664ad7e65a2a78195a28bd4339fd 100644 (file)
@@ -127,7 +127,7 @@ struct i40e_hmc_info {
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
                (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +146,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
index 870ab1ee072cdf4759e848a8eca6cdbf6dc0a793..4627588f461346f292e8e03aca4967f622dbdc6c 100644 (file)
@@ -417,7 +417,6 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
                        default:
                                ret_code = I40E_ERR_INVALID_SD_TYPE;
                                goto exit;
-                               break;
                        }
                }
        }
@@ -502,7 +501,6 @@ try_type_paged:
                hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
                          ret_code);
                goto configure_lan_hmc_out;
-               break;
        }
 
        /* Configure and program the FPM registers so objects can be created */
@@ -746,6 +744,194 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
        { 0 }
 };
 
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u8 src_byte, dest_byte, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = ((u8)1 << ce_info->width) - 1;
+
+       src_byte = *from;
+       src_byte &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_byte <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_byte, dest, sizeof(dest_byte));
+
+       dest_byte &= ~mask;     /* get the bits not changing */
+       dest_byte |= src_byte;  /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_byte, sizeof(dest_byte));
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u16 src_word, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le16 dest_word;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = ((u16)1 << ce_info->width) - 1;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_word = *(u16 *)from;
+       src_word &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_word <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_word, dest, sizeof(dest_word));
+
+       dest_word &= ~(cpu_to_le16(mask));      /* get the bits not changing */
+       dest_word |= cpu_to_le16(src_word);     /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_word, sizeof(dest_word));
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u32 src_dword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le32 dest_dword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 32 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 5 bits so the shift will do nothing
+        */
+       if (ce_info->width < 32)
+               mask = ((u32)1 << ce_info->width) - 1;
+       else
+               mask = 0xFFFFFFFF;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_dword = *(u32 *)from;
+       src_dword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_dword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_dword, dest, sizeof(dest_dword));
+
+       dest_dword &= ~(cpu_to_le32(mask));     /* get the bits not changing */
+       dest_dword |= cpu_to_le32(src_dword);   /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_dword, sizeof(dest_dword));
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u64 src_qword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le64 dest_qword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 64 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 6 bits so the shift will do nothing
+        */
+       if (ce_info->width < 64)
+               mask = ((u64)1 << ce_info->width) - 1;
+       else
+               mask = 0xFFFFFFFFFFFFFFFF;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_qword = *(u64 *)from;
+       src_qword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_qword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_qword, dest, sizeof(dest_qword));
+
+       dest_qword &= ~(cpu_to_le64(mask));     /* get the bits not changing */
+       dest_qword |= cpu_to_le64(src_qword);   /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_qword, sizeof(dest_qword));
+}
+
 /**
  * i40e_clear_hmc_context - zero out the HMC context bits
  * @hw:       the hardware struct
@@ -772,71 +958,28 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
                                        struct i40e_context_ele *ce_info,
                                        u8 *dest)
 {
-       u16 shift_width;
-       u64 bitfield;
-       u8 hi_byte;
-       u8 hi_mask;
-       u64 t_bits;
-       u64 mask;
-       u8 *p;
        int f;
 
        for (f = 0; ce_info[f].width != 0; f++) {
-               /* clear out the field */
-               bitfield = 0;
 
-               /* copy from the next struct field */
-               p = dest + ce_info[f].offset;
+               /* we have to deal with each element of the HMC using the
+                * correct size so that we are correct regardless of the
+                * endianness of the machine
+                */
                switch (ce_info[f].size_of) {
                case 1:
-                       bitfield = *p;
+                       i40e_write_byte(context_bytes, &ce_info[f], dest);
                        break;
                case 2:
-                       bitfield = cpu_to_le16(*(u16 *)p);
+                       i40e_write_word(context_bytes, &ce_info[f], dest);
                        break;
                case 4:
-                       bitfield = cpu_to_le32(*(u32 *)p);
+                       i40e_write_dword(context_bytes, &ce_info[f], dest);
                        break;
                case 8:
-                       bitfield = cpu_to_le64(*(u64 *)p);
+                       i40e_write_qword(context_bytes, &ce_info[f], dest);
                        break;
                }
-
-               /* prepare the bits and mask */
-               shift_width = ce_info[f].lsb % 8;
-               mask = ((u64)1 << ce_info[f].width) - 1;
-
-               /* save upper bytes for special case */
-               hi_mask = (u8)((mask >> 56) & 0xff);
-               hi_byte = (u8)((bitfield >> 56) & 0xff);
-
-               /* shift to correct alignment */
-               mask <<= shift_width;
-               bitfield <<= shift_width;
-
-               /* get the current bits from the target bit string */
-               p = context_bytes + (ce_info[f].lsb / 8);
-               memcpy(&t_bits, p, sizeof(u64));
-
-               t_bits &= ~mask;          /* get the bits not changing */
-               t_bits |= bitfield;       /* add in the new bits */
-
-               /* put it all back */
-               memcpy(p, &t_bits, sizeof(u64));
-
-               /* deal with the special case if needed
-                * example: 62 bit field that starts in bit 5 of first byte
-                *          will overlap 3 bits into byte 9
-                */
-               if ((shift_width + ce_info[f].width) > 64) {
-                       u8 byte;
-
-                       hi_mask >>= (8 - shift_width);
-                       hi_byte >>= (8 - shift_width);
-                       byte = p[8] & ~hi_mask;  /* get the bits not changing */
-                       byte |= hi_byte;         /* add in the new bits */
-                       p[8] = byte;             /* put it back */
-               }
        }
 
        return 0;
index eb65fe23c4a70077e31e2546208aaeaecf93224d..e74128db5be5480aef4eaa4761890c0325c82827 100644 (file)
@@ -32,16 +32,22 @@ struct i40e_hw;
 
 /* HMC element context information */
 
-/* Rx queue context data */
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
 struct i40e_hmc_obj_rxq {
        u16 head;
-       u8  cpuid;
+       u16 cpuid; /* bigger than needed, see above for reason */
        u64 base;
        u16 qlen;
 #define I40E_RXQ_CTX_DBUFF_SHIFT 7
-       u8  dbuff;
+       u16 dbuff; /* bigger than needed, see above for reason */
 #define I40E_RXQ_CTX_HBUFF_SHIFT 6
-       u8  hbuff;
+       u16 hbuff; /* bigger than needed, see above for reason */
        u8  dtype;
        u8  dsize;
        u8  crcstrip;
@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
        u8  hsplit_0;
        u8  hsplit_1;
        u8  showiv;
-       u16 rxmax;
+       u32 rxmax; /* bigger than needed, see above for reason */
        u8  tphrdesc_ena;
        u8  tphwdesc_ena;
        u8  tphdata_ena;
        u8  tphhead_ena;
-       u8  lrxqthresh;
+       u16 lrxqthresh; /* bigger than needed, see above for reason */
        u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
-/* Tx queue context data */
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
 struct i40e_hmc_obj_txq {
        u16 head;
        u8  new_context;
@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
        u8  fd_ena;
        u8  alt_vlan_ena;
        u16 thead_wb;
-       u16 cpuid;
+       u cpuid;
        u8  head_wb_ena;
        u16 qlen;
        u8  tphrdesc_ena;
index 275ca9a1719ed812e7ee15ac8bfda447ee31e217..c34e39009a8f5c7ae0d43e735490cfa61f7b1ef7 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_BUILD 21
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -278,7 +278,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
        pf->tx_timeout_count++;
 
        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
-               pf->tx_timeout_recovery_level = 0;
+               pf->tx_timeout_recovery_level = 1;
        pf->tx_timeout_last_recovery = jiffies;
        netdev_info(netdev, "tx_timeout recovery level %d\n",
                    pf->tx_timeout_recovery_level);
@@ -304,8 +304,8 @@ static void i40e_tx_timeout(struct net_device *netdev)
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
-               set_bit(__I40E_DOWN, &vsi->state);
-               i40e_down(vsi);
+               set_bit(__I40E_DOWN_REQUESTED, &pf->state);
+               set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                break;
        }
        i40e_service_event_schedule(pf);
@@ -444,9 +444,21 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
  **/
 void i40e_pf_reset_stats(struct i40e_pf *pf)
 {
+       int i;
+
        memset(&pf->stats, 0, sizeof(pf->stats));
        memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
        pf->stat_offsets_loaded = false;
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (pf->veb[i]) {
+                       memset(&pf->veb[i]->stats, 0,
+                              sizeof(pf->veb[i]->stats));
+                       memset(&pf->veb[i]->stats_offsets, 0,
+                              sizeof(pf->veb[i]->stats_offsets));
+                       pf->veb[i]->stat_offsets_loaded = false;
+               }
+       }
 }
 
 /**
@@ -1315,9 +1327,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 
        netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
 
-       if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
-               return 0;
-
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return -EADDRNOTAVAIL;
@@ -1325,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
                ret = i40e_aq_mac_address_write(&vsi->back->hw,
-                                               I40E_AQC_WRITE_TYPE_LAA_ONLY,
+                                               I40E_AQC_WRITE_TYPE_LAA_WOL,
                                                addr->sa_data, NULL);
                if (ret) {
                        netdev_info(netdev,
@@ -1333,22 +1342,27 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                                    ret);
                        return -EADDRNOTAVAIL;
                }
-
-               ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
        }
 
-       /* In order to be sure to not drop any packets, add the new address
-        * then delete the old one.
-        */
-       f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
-       if (!f)
-               return -ENOMEM;
+       f = i40e_find_mac(vsi, addr->sa_data, false, true);
+       if (!f) {
+               /* In order to be sure to not drop any packets, add the
+                * new address first then delete the old one.
+                */
+               f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
+                                   false, false);
+               if (!f)
+                       return -ENOMEM;
 
-       i40e_sync_vsi_filters(vsi);
-       i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
-       i40e_sync_vsi_filters(vsi);
+               i40e_sync_vsi_filters(vsi);
+               i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+                               false, false);
+               i40e_sync_vsi_filters(vsi);
+       }
 
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       f->is_laa = true;
+       if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+               ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
        return 0;
 }
@@ -2387,10 +2401,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 
        rx_ctx.rxmax = min_t(u16, vsi->max_frame,
                                  (chain_len * ring->rx_buf_len));
-       rx_ctx.tphrdesc_ena = 1;
-       rx_ctx.tphwdesc_ena = 1;
-       rx_ctx.tphdata_ena = 1;
-       rx_ctx.tphhead_ena = 1;
        if (hw->revision_id == 0)
                rx_ctx.lrxqthresh = 0;
        else
@@ -2755,6 +2765,22 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
        /* skip the flush */
 }
 
+/**
+ * i40e_irq_dynamic_disable - Disable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       i40e_flush(hw);
+}
+
 /**
  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
  * @irq: interrupt number
@@ -3057,16 +3083,33 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
                /* clear next_to_watch to prevent false hangs */
                tx_buf->next_to_watch = NULL;
 
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
+               /* move past filter desc */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
                                 dma_unmap_addr(tx_buf, dma),
                                 dma_unmap_len(tx_buf, len),
                                 DMA_TO_DEVICE);
+               if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buf->raw_buf);
 
+               tx_buf->raw_buf = NULL;
+               tx_buf->tx_flags = 0;
+               tx_buf->next_to_watch = NULL;
                dma_unmap_len_set(tx_buf, len, 0);
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
 
-
-               /* move to the next desc and buffer to clean */
+               /* move us past the eop_desc for start of next FD desc */
                tx_buf++;
                tx_desc++;
                i++;
@@ -3151,8 +3194,12 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 
        /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
         * group them so there are multiple queues per vector.
+        * It is also important to go through all the vectors available to be
+        * sure that if we don't use all the vectors, that the remaining vectors
+        * are cleared. This is especially important when decreasing the
+        * number of queues in use.
         */
-       for (; v_start < q_vectors && qp_remaining; v_start++) {
+       for (; v_start < q_vectors; v_start++) {
                struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
 
                num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
@@ -3227,6 +3274,35 @@ static void i40e_netpoll(struct net_device *netdev)
 }
 #endif
 
+/**
+ * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Tx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 tx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 /**
  * i40e_vsi_control_tx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -3236,7 +3312,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 tx_reg;
 
        pf_q = vsi->base_queue;
@@ -3269,22 +3345,46 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-                       if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_txq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Tx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
        if (hw->revision_id == 0)
                mdelay(50);
+       return ret;
+}
+
+/**
+ * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 rx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
 
        return 0;
 }
@@ -3298,7 +3398,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 rx_reg;
 
        pf_q = vsi->base_queue;
@@ -3323,22 +3423,17 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
-
-                       if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Rx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -4231,8 +4326,12 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 static int i40e_up_complete(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
+       u8 set_fc_aq_fail = 0;
        int err;
 
+       /* force flow control off */
+       i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                i40e_vsi_configure_msix(vsi);
        else
@@ -4638,6 +4737,23 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                        }
                }
 
+               /* no further action needed, so return now */
+               return;
+       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+               int v;
+
+               /* Find the VSI(s) that needs to be brought down */
+               dev_info(&pf->pdev->dev, "VSI down requested\n");
+               for (v = 0; v < pf->num_alloc_vsi; v++) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+                       if (vsi != NULL &&
+                           test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
+                               set_bit(__I40E_DOWN, &vsi->state);
+                               i40e_down(vsi);
+                               clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
+                       }
+               }
+
                /* no further action needed, so return now */
                return;
        } else {
@@ -4845,7 +4961,20 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
+ **/
+int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+{
+       int val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_get_current_fd_count - Get the count of total FD filters programmed
  * @pf: board private structure
  **/
 int i40e_get_current_fd_count(struct i40e_pf *pf)
@@ -4857,7 +4986,6 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
                      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
        return fcnt_prog;
 }
-
 /**
  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
  * @pf: board private structure
@@ -4872,8 +5000,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
        if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
            (pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
-       fcnt_prog = i40e_get_current_fd_count(pf);
-       fcnt_avail = i40e_get_fd_cnt_all(pf);
+       fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+       fcnt_avail = pf->fdir_pf_filter_count;
        if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -5110,6 +5238,10 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
+       if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
+               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+       }
 
        /* If there's a recovery already waiting, it takes
         * precedence before starting a new reset sequence.
@@ -5164,7 +5296,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
         * then see if the status changed while processing the
         * initial event.
         */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 }
 
@@ -5182,9 +5314,6 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        u32 oldval;
        u32 val;
 
-       if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
-               return;
-
        /* check for error indications */
        val = rd32(&pf->hw, pf->hw.aq.arq.len);
        oldval = val;
@@ -5228,10 +5357,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        do {
                event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
                ret = i40e_clean_arq_element(hw, &event, &pending);
-               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                       dev_info(&pf->pdev->dev, "No ARQ event found\n");
+               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                        break;
-               else if (ret) {
+               else if (ret) {
                        dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
                        break;
                }
@@ -5463,6 +5591,20 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
        struct i40e_vsi *vsi;
        int i;
 
+       /* quick workaround for an NVM issue that leaves a critical register
+        * uninitialized
+        */
+       if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
+               static const u32 hkey[] = {
+                       0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
+                       0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
+                       0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
+                       0x95b3a76d};
+
+               for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
+                       wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
+       }
+
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
 
@@ -5512,7 +5654,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
  *
  * Close up the VFs and other things in prep for pf Reset.
   **/
-static int i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret = 0;
@@ -5520,7 +5662,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
 
        clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
-               return 0;
+               return;
 
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
@@ -5537,13 +5679,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
        /* call shutdown HMC */
        if (hw->hmc.hmc_obj) {
                ret = i40e_shutdown_lan_hmc(hw);
-               if (ret) {
+               if (ret)
                        dev_warn(&pf->pdev->dev,
                                 "shutdown_lan_hmc failed: %d\n", ret);
-                       clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
-               }
        }
-       return ret;
 }
 
 /**
@@ -5679,7 +5818,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        }
 
        if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
-               dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
                /* no VEB, so rebuild only the Main VSI */
                ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
                if (ret) {
@@ -5717,11 +5856,8 @@ end_core_reset:
  **/
 static void i40e_handle_reset_warning(struct i40e_pf *pf)
 {
-       i40e_status ret;
-
-       ret = i40e_prep_for_reset(pf);
-       if (!ret)
-               i40e_reset_and_rebuild(pf, false);
+       i40e_prep_for_reset(pf);
+       i40e_reset_and_rebuild(pf, false);
 }
 
 /**
@@ -5734,6 +5870,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        bool mdd_detected = false;
+       bool pf_mdd_detected = false;
        struct i40e_vf *vf;
        u32 reg;
        int i;
@@ -5744,26 +5881,28 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        /* find what triggered the MDD event */
        reg = rd32(hw, I40E_GL_MDET_TX);
        if (reg & I40E_GL_MDET_TX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_TX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
-                               >> I40E_GL_MDET_TX_QUEUE_SHIFT;
+               u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_PF_NUM_SHIFT;
+               u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_VF_NUM_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_TX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+                               I40E_GL_MDET_TX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
-                        event, queue, func);
+                        "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+                        event, queue, pf_num, vf_num);
                wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
                mdd_detected = true;
        }
        reg = rd32(hw, I40E_GL_MDET_RX);
        if (reg & I40E_GL_MDET_RX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_RX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
-                               >> I40E_GL_MDET_RX_QUEUE_SHIFT;
+               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+                               I40E_GL_MDET_RX_FUNCTION_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_RX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+                               I40E_GL_MDET_RX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
                         "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
                         event, queue, func);
@@ -5771,6 +5910,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                mdd_detected = true;
        }
 
+       if (mdd_detected) {
+               reg = rd32(hw, I40E_PF_MDET_TX);
+               if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+                       dev_info(&pf->pdev->dev,
+                                "MDD TX event is for this function 0x%08x, requesting PF reset.\n",
+                                reg);
+                       pf_mdd_detected = true;
+               }
+               reg = rd32(hw, I40E_PF_MDET_RX);
+               if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+                       dev_info(&pf->pdev->dev,
+                                "MDD RX event is for this function 0x%08x, requesting PF reset.\n",
+                                reg);
+                       pf_mdd_detected = true;
+               }
+               /* Queue belongs to the PF, initiate a reset */
+               if (pf_mdd_detected) {
+                       set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       i40e_service_event_schedule(pf);
+               }
+       }
+
        /* see if one of the VFs needs its hand slapped */
        for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
                vf = &(pf->vf[i]);
@@ -5860,6 +6023,12 @@ static void i40e_service_task(struct work_struct *work)
                                          service_task);
        unsigned long start_time = jiffies;
 
+       /* don't bother with service tasks if a reset is in progress */
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+               i40e_service_event_complete(pf);
+               return;
+       }
+
        i40e_reset_subtask(pf);
        i40e_handle_mdd_event(pf);
        i40e_vc_process_vflr_event(pf);
@@ -6492,6 +6661,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
        u32 lut = 0;
        int i, j;
        u64 hena;
+       u32 reg_val;
 
        /* Fill out hash function seed */
        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -6504,8 +6674,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
+       /* Check capability and Set table size and register per hw expectation*/
+       reg_val = rd32(hw, I40E_PFQF_CTL_0);
+       if (hw->func_caps.rss_table_size == 512) {
+               reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+               pf->rss_table_size = 512;
+       } else {
+               pf->rss_table_size = 128;
+               reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+       }
+       wr32(hw, I40E_PFQF_CTL_0, reg_val);
+
        /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+       for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
 
                /* The assumption is that lan qp count will be the highest
                 * qp count for any PF VSI that needs RSS.
@@ -6592,13 +6773,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
         * maximum might end up larger than the available queues
         */
        pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size = 1;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
                pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
-       } else {
-               pf->rss_size = 1;
        }
 
        /* MFP mode enabled */
@@ -6670,6 +6850,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
        pf->irq_pile->search_hint = 0;
 
+       pf->tx_timeout_recovery_level = 1;
+
        mutex_init(&pf->switch_mutex);
 
 sw_init_done:
@@ -6702,9 +6884,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                        i40e_fdir_filter_exit(pf);
                }
                pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-               /* if ATR was disabled it can be re-enabled. */
-               if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
-                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               /* if ATR was auto disabled it can be re-enabled. */
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
        }
        return need_reset;
 }
@@ -6833,6 +7017,22 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 }
 
 #endif
+static int i40e_get_phys_port_id(struct net_device *netdev,
+                                struct netdev_phys_port_id *ppid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+
+       if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
+       memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+
+       return 0;
+}
+
 #ifdef HAVE_FDB_OPS
 #ifdef USE_CONST_DEV_UC_CHAR
 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -6910,13 +7110,14 @@ static int i40e_ndo_fdb_del(struct ndmsg *ndm,
 static int i40e_ndo_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
                             struct net_device *dev,
+                            struct net_device *filter_dev,
                             int idx)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_pf *pf = np->vsi->back;
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
-               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
 
        return idx;
 }
@@ -6951,6 +7152,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
 #endif
+       .ndo_get_phys_port_id   = i40e_get_phys_port_id,
 #ifdef HAVE_FDB_OPS
        .ndo_fdb_add            = i40e_ndo_fdb_add,
 #ifndef USE_DEFAULT_FDB_DEL_DUMP
@@ -7223,6 +7425,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                f->changed = true;
                f_count++;
+
+               if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
+                       i40e_aq_mac_address_write(&vsi->back->hw,
+                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                                 f->macaddr, NULL);
+               }
        }
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -8090,7 +8298,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
-       u32 rxfc = 0, txfc = 0, rxfc_reg;
        int ret;
 
        /* find out what's out there already */
@@ -8150,68 +8357,13 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
        pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
                                  I40E_AQ_AN_COMPLETED) ? true : false);
-       /* requested_mode is set in probe or by ethtool */
-       if (!pf->fc_autoneg_status)
-               goto no_autoneg;
-
-       if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
-           (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
-               pf->hw.fc.current_mode = I40E_FC_FULL;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
-               pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
-               pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
-       else
-               pf->hw.fc.current_mode = I40E_FC_NONE;
 
-       /* sync the flow control settings with the auto-neg values */
-       switch (pf->hw.fc.current_mode) {
-       case I40E_FC_FULL:
-               txfc = 1;
-               rxfc = 1;
-               break;
-       case I40E_FC_TX_PAUSE:
-               txfc = 1;
-               rxfc = 0;
-               break;
-       case I40E_FC_RX_PAUSE:
-               txfc = 0;
-               rxfc = 1;
-               break;
-       case I40E_FC_NONE:
-       case I40E_FC_DEFAULT:
-               txfc = 0;
-               rxfc = 0;
-               break;
-       case I40E_FC_PFC:
-               /* TBD */
-               break;
-       /* no default case, we have to handle all possibilities here */
-       }
-
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
-
-       rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                  ~I40E_PRTDCB_MFLCN_RFCE_MASK;
-       rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
-
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
-
-       goto fc_complete;
-
-no_autoneg:
-       /* disable L2 flow control, user can turn it on if they wish */
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                                        ~I40E_PRTDCB_MFLCN_RFCE_MASK);
-
-fc_complete:
        i40e_ptp_init(pf);
 
        return ret;
@@ -8460,6 +8612,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Reset here to make sure all is clean and to define PF 'n' */
+       i40e_clear_hw(hw);
        err = i40e_pf_reset(hw);
        if (err) {
                dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
@@ -8495,6 +8648,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_pf_reset;
        }
 
+       if (hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+               dev_info(&pdev->dev,
+                        "Note: FW API version %02x.%02x newer than expected %02x.%02x, recommend driver update.\n",
+                        hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                        I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+
+       if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+           hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR-1))
+               dev_info(&pdev->dev,
+                        "Note: FW API version %02x.%02x older than expected %02x.%02x, recommend nvm update.\n",
+                        hw->aq.api_maj_ver, hw->aq.api_min_ver,
+                        I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+
+
        i40e_verify_eeprom(pf);
 
        /* Rev 0 hardware was never productized */
@@ -8535,6 +8702,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
        ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
+       i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+       if (is_valid_ether_addr(hw->mac.port_addr))
+               pf->flags |= I40E_FLAG_PORT_ID_VALID;
 
        pci_set_drvdata(pdev, pf);
        pci_save_state(pdev);
@@ -8722,7 +8892,6 @@ static void i40e_remove(struct pci_dev *pdev)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
        i40e_status ret_code;
-       u32 reg;
        int i;
 
        i40e_dbg_pf_exit(pf);
@@ -8800,11 +8969,6 @@ static void i40e_remove(struct pci_dev *pdev)
        kfree(pf->irq_pile);
        kfree(pf->vsi);
 
-       /* force a PF reset to clean anything leftover */
-       reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
-       wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
-       i40e_flush(&pf->hw);
-
        iounmap(pf->hw.hw_addr);
        kfree(pf);
        pci_release_selected_regions(pdev,
index 81299189a47d3e58b61e75dba58869a85e813f2e..66bcb15422daddc4de40d8d37bc4148e916ddfa8 100644 (file)
@@ -324,13 +324,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
        u16 checksum_sr = 0;
        u16 checksum_local = 0;
 
-       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-       if (ret_code)
-               goto i40e_validate_nvm_checksum_exit;
-
        ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
        if (ret_code)
-               goto i40e_validate_nvm_checksum_free;
+               goto i40e_validate_nvm_checksum_exit;
 
        /* Do not use i40e_read_nvm_word() because we do not want to take
         * the synchronization semaphores twice here.
@@ -347,9 +343,6 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
        if (checksum)
                *checksum = checksum_local;
 
-i40e_validate_nvm_checksum_free:
-       i40e_release_nvm(hw);
-
 i40e_validate_nvm_checksum_exit:
        return ret_code;
 }
index a430699c41d5e2be548db369bca02efdcaad40bf..9383f08ff4e3e3fc2f9e6cb155b1dacf3dc46335 100644 (file)
@@ -74,13 +74,24 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
                                struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_reset);
 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-                               struct i40e_asq_cmd_details *cmd_details);
+                                       bool enable_link,
+                                       struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
                                bool enable_lse, struct i40e_link_status *link,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
 i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
                                u64 advt_reg,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -216,10 +227,11 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
 /* i40e_common */
 i40e_status i40e_init_shared_code(struct i40e_hw *hw);
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
 bool i40e_get_link_status(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
-                                               u8 *mac_addr);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
 /* prototype for functions used for NVM access */
index 101f439acda6adfd7e57084865f3bd1b16d06362..bb7fe98b3a6cd7bd3c99d7454186f19451e84f1d 100644 (file)
@@ -25,7 +25,6 @@
  ******************************************************************************/
 
 #include "i40e.h"
-#include <linux/export.h>
 #include <linux/ptp_classify.h>
 
 /* The XL710 timesync is very much like Intel's 82599 design when it comes to
@@ -216,7 +215,7 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
 }
 
 /**
- * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
  * @ptp: The PTP clock structure
  * @rq: The requested feature to change
  * @on: Enable/disable flag
@@ -224,8 +223,8 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
  * The XL710 does not support any of the ancillary features of the PHC
  * subsystem, so this function may just return.
  **/
-static int i40e_ptp_enable(struct ptp_clock_info *ptp,
-                          struct ptp_clock_request *rq, int on)
+static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
+                                  struct ptp_clock_request *rq, int on)
 {
        return -EOPNOTSUPP;
 }
@@ -315,6 +314,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
        skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(pf->ptp_tx_skb);
        pf->ptp_tx_skb = NULL;
+       clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
 }
 
 /**
@@ -423,28 +423,23 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
 }
 
 /**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
  * @pf: Board private structure
- * @ifreq: ioctl data
+ * @config: hwtstamp settings requested or saved
  *
- * Respond to the user filter requests and make the appropriate hardware
- * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
- * logic, so keep track in software of whether to indicate these timestamps
- * or not.
+ * Control hardware registers to enter the specific mode requested by the
+ * user. Also used during reset path to ensure that timestamp settings are
+ * maintained.
  *
- * It is permissible to "upgrade" the user request to a broader filter, as long
- * as the user receives the timestamps they care about and the user is notified
- * the filter has been broadened.
+ * Note: modifies config in place, and may update the requested mode to be
+ * more broad if the specific filter is not directly supported.
  **/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
+                                      struct hwtstamp_config *config)
 {
        struct i40e_hw *hw = &pf->hw;
-       struct hwtstamp_config *config = &pf->tstamp_config;
        u32 pf_id, tsyntype, regval;
 
-       if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
-               return -EFAULT;
-
        /* Reserved for future extensions. */
        if (config->flags)
                return -EINVAL;
@@ -452,8 +447,12 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
        /* Confirm that 1588 is supported on this PF. */
        pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
                I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
-       if (hw->pf_id != pf_id)
-               return -EINVAL;
+       if (hw->pf_id != pf_id) {
+               dev_err(&pf->pdev->dev,
+                       "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
+                       hw->pf_id, hw->port, pf_id);
+               return -EPERM;
+       }
 
        switch (config->tx_type) {
        case HWTSTAMP_TX_OFF:
@@ -535,23 +534,59 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
                wr32(hw, I40E_PRTTSYN_CTL1, regval);
        }
 
-       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+       return 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       int err;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       err = i40e_ptp_set_timestamp_mode(pf, &config);
+       if (err)
+               return err;
+
+       /* save these settings for future reference */
+       pf->tstamp_config = config;
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
                -EFAULT : 0;
 }
 
 /**
- * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * i40e_ptp_create_clock - Create PTP clock device for userspace
  * @pf: Board private structure
  *
- * This function registers the device clock as a PHC. If it is successful, it
- * starts the clock in the hardware.
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one, so it is safe to call. Will return error if it
+ * can't create one, but success if we already have a device. Should be used
+ * by i40e_ptp_init to create clock initially, and prevent global resets from
+ * creating new clock devices.
  **/
-void i40e_ptp_init(struct i40e_pf *pf)
+static long i40e_ptp_create_clock(struct i40e_pf *pf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+       /* no need to create a clock device if we already have one */
+       if (!IS_ERR_OR_NULL(pf->ptp_clock))
+               return 0;
 
-       strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+       strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
        pf->ptp_caps.owner = THIS_MODULE;
        pf->ptp_caps.max_adj = 999999999;
        pf->ptp_caps.n_ext_ts = 0;
@@ -560,11 +595,46 @@ void i40e_ptp_init(struct i40e_pf *pf)
        pf->ptp_caps.adjtime = i40e_ptp_adjtime;
        pf->ptp_caps.gettime = i40e_ptp_gettime;
        pf->ptp_caps.settime = i40e_ptp_settime;
-       pf->ptp_caps.enable = i40e_ptp_enable;
+       pf->ptp_caps.enable = i40e_ptp_feature_enable;
 
        /* Attempt to register the clock before enabling the hardware. */
        pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
        if (IS_ERR(pf->ptp_clock)) {
+               return PTR_ERR(pf->ptp_clock);
+       }
+
+       /* clear the hwtstamp settings here during clock create, instead of
+        * during regular init, so that we can maintain settings across a
+        * reset or suspend.
+        */
+       pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+       pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+       return 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for 1588 support. The first time it is run, it
+ * will create a PHC clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+       struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+       struct i40e_hw *hw = &pf->hw;
+       long err;
+
+       /* we have to initialize the lock first, since we can't control
+        * when the user will enter the PHC device entry points
+        */
+       spin_lock_init(&pf->tmreg_lock);
+
+       /* ensure we have a clock device */
+       err = i40e_ptp_create_clock(pf);
+       if (err) {
                pf->ptp_clock = NULL;
                dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
                        __func__);
@@ -572,8 +642,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
                struct timespec ts;
                u32 regval;
 
-               spin_lock_init(&pf->tmreg_lock);
-
                dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
                         netdev->name);
                pf->flags |= I40E_FLAG_PTP;
@@ -589,8 +657,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
                /* Set the increment value per clock tick. */
                i40e_ptp_set_increment(pf);
 
-               /* reset the tstamp_config */
-               memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+               /* reset timestamping mode */
+               i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
                ts = ktime_to_timespec(ktime_get_real());
@@ -614,6 +682,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
+               clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
        }
 
        if (pf->ptp_clock) {
index 947de98500f33a4162c441967f6a6bacdd0ab148..65d3c8bb2d5b4f05eb4124ca61b041f0b7ae78aa 100644 (file)
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-
-#define I40E_PF_ARQBAH 0x00080180
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
 #define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
 #define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
 #define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
 #define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
 #define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
 #define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
 #define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
 #define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
 #define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAH_MAX_INDEX 127
 #define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAL_MAX_INDEX 127
 #define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQH_MAX_INDEX 127
 #define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQLEN_MAX_INDEX 127
 #define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQT_MAX_INDEX 127
 #define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAH_MAX_INDEX 127
 #define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAL_MAX_INDEX 127
 #define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQH_MAX_INDEX 127
 #define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQLEN_MAX_INDEX 127
 #define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQT_MAX_INDEX 127
 #define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
 #define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
 #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
 #define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
 #define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
 #define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
 #define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
 #define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
 #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
 #define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
 #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
 #define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
 #define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
 #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
 #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
 #define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
 #define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
 #define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
 #define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
 #define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
 #define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
 #define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
 #define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
 #define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
 #define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
 #define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
 #define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
 #define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
 #define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TDPUC 0x00044100
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
 #define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
-#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
 #define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
 #define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
 #define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
 #define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
 #define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
 #define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
 #define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
 #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
 #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
 #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
 #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
 #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CCMD_MAX_INDEX 3
 #define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
 #define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
 #define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
 #define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
 #define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
 #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSCA_MAX_INDEX 3
 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
 #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
 #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
 #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_PE_ENA 0x000B81A0
-#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
-#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
 #define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
 #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
 #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
 #define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
 #define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
 #define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
 #define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
 #define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
 #define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
 #define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
 #define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
 #define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
 #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
 #define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
 #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000
-#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
-#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
 #define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
 #define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
 #define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
 #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
 #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
 #define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
 #define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFGEN_RSTAT1_MAX_INDEX 127
 #define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RSTAT_MAX_INDEX 383
 #define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RTRIG_MAX_INDEX 383
 #define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
 #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
 #define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
 #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
 #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
 #define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
 #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
 #define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
 #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_SDPART_MAX_INDEX 15
 #define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
 #define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
 #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
 #define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
 #define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
 #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
 #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
 #define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
 #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
 #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_UFUSE 0x00094008
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
 #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
 #define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
 #define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
 #define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
 #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
 #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
 #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_PFINT_CEQCTL_MAX_INDEX 511
 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
 #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
 #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
 #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
 #define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
 #define I40E_PFINT_ITR0_MAX_INDEX 2
 #define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_ITRN_MAX_INDEX 2
 #define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_LNKLSTN_MAX_INDEX 511
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
 #define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
 #define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_RATEN_MAX_INDEX 511
 #define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_RQCTL_MAX_INDEX 1535
 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_TQCTL_MAX_INDEX 1535
 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
 #define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_MAX_INDEX 127
 #define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_ITR0_MAX_INDEX 2
 #define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPINT_AEQCTL_MAX_INDEX 127
 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_VPINT_CEQCTL_MAX_INDEX 511
 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLST0_MAX_INDEX 127
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLSTN_MAX_INDEX 511
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_RATE0_MAX_INDEX 127
 #define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
 #define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_RATEN_MAX_INDEX 511
 #define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
 #define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
 #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
 #define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
 #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-
-#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
 #define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QRX_TAIL_MAX_INDEX 1535
 #define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_CTL_MAX_INDEX 1535
 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
 #define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
 #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_ENA_MAX_INDEX 1535
 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
 #define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_HEAD_MAX_INDEX 1535
 #define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
 #define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_TAIL_MAX_INDEX 1535
 #define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_MAPENA_MAX_INDEX 127
 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_QTABLE_MAX_INDEX 15
 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QBASE_MAX_INDEX 383
 #define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QTABLE_MAX_INDEX 7
 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
 #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
 #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
 #define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
 #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HLCTLA 0x001E4760
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSECTL1 0x001E3560
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
 #define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
-#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
 #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
 #define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
 #define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
 #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
 #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
 #define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_METF_MAX_INDEX 3
 #define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
 #define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
 #define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
 #define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
 #define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
 #define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAH_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAL_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
 #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
 #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
 #define I40E_MSIX_PBA_MAX_INDEX 5
 #define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TADD_MAX_INDEX 128
 #define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TMSG_MAX_INDEX 128
 #define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TUADD_MAX_INDEX 128
 #define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TVCTRL_MAX_INDEX 128
 #define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG1_MAX_INDEX 639
 #define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
 #define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
 #define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
 #define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
 #define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
 #define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
 #define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
 #define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
 #define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
 #define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
 #define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
 #define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
 #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
 #define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
 #define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
 #define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
 #define I40E_GLNVM_PROTCSR_MAX_INDEX 59
 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
 #define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
 #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
 #define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
 #define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-
-#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
 #define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
 #define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
 #define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
 #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
 #define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
 #define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
 #define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
 #define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
 #define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
 #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
 #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
 #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
 #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PCITEST2 0x000BE4BC
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
-
-#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
 #define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
 #define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
 #define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
 #define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
 #define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
 #define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SUBSYSID 0x000BE48C
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
-#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
 #define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
 #define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
 #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
 #define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
 #define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
 #define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
 #define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
 #define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
 #define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
 #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
 #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
 #define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
 #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
 #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
 #define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PFDEVID 0x000BE080
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
 #define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
 #define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_VFDEVID 0x000BE100
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
 #define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
 #define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-
-#define I40E_PFPE_MRTEIDXMASK 0x00008600
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
 #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
 #define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
 #define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
 #define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
 #define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
 #define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
 #define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
 #define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
 #define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
 #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
 #define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
 #define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
 #define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
 #define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DHW_MAX_INDEX 7
 #define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DLW_MAX_INDEX 7
 #define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DPS_MAX_INDEX 7
 #define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SHT_MAX_INDEX 7
 #define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
 #define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SLT_MAX_INDEX 7
 #define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
 #define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
 #define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
 #define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
 #define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
 #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
 #define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
 #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
 #define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
 #define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
 #define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
 #define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
 #define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_HSYM_MAX_INDEX 63
 #define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_GLQF_PCNT_MAX_INDEX 511
 #define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_SWAP_MAX_INDEX 1
 #define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
 #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
 #define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
 #define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
 #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_PFQF_HENA_MAX_INDEX 1
 #define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_PFQF_HKEY_MAX_INDEX 12
 #define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
 #define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
 #define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
 #define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_PFQF_HLUT_MAX_INDEX 127
 #define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
 #define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
 #define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
 #define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
 #define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
 #define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
 #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
 #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HENA1_MAX_INDEX 1
 #define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
 #define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
 #define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
 #define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
 #define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
 #define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION1_MAX_INDEX 7
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPQF_CTL_MAX_INDEX 127
 #define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
 #define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_CTL_MAX_INDEX 383
 #define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_TCREGION_MAX_INDEX 3
 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOECRC_MAX_INDEX 143
 #define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDDPC_MAX_INDEX 143
 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-/* _i=0...143 */
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXVC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOELAST_MAX_INDEX 143
 #define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPRC_MAX_INDEX 143
 #define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPTC_MAX_INDEX 143
 #define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOERPDC_MAX_INDEX 143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
 #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
 #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
 #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
 #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCH_MAX_INDEX 3
 #define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCL_MAX_INDEX 3
 #define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCH_MAX_INDEX 3
 #define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCL_MAX_INDEX 3
 #define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ILLERRC_MAX_INDEX 3
 #define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LDPC_MAX_INDEX 3
 #define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MLFC_MAX_INDEX 3
 #define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCH_MAX_INDEX 3
 #define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCL_MAX_INDEX 3
 #define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCH_MAX_INDEX 3
 #define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCL_MAX_INDEX 3
 #define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MRFC_MAX_INDEX 3
 #define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127H_MAX_INDEX 3
 #define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127L_MAX_INDEX 3
 #define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255H_MAX_INDEX 3
 #define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255L_MAX_INDEX 3
 #define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511H_MAX_INDEX 3
 #define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511L_MAX_INDEX 3
 #define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64H_MAX_INDEX 3
 #define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64L_MAX_INDEX 3
 #define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127H_MAX_INDEX 3
 #define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127L_MAX_INDEX 3
 #define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255H_MAX_INDEX 3
 #define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255L_MAX_INDEX 3
 #define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511H_MAX_INDEX 3
 #define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511L_MAX_INDEX 3
 #define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64H_MAX_INDEX 3
 #define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64L_MAX_INDEX 3
 #define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RDPC_MAX_INDEX 3
 #define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RFC_MAX_INDEX 3
 #define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RJC_MAX_INDEX 3
 #define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RLEC_MAX_INDEX 3
 #define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ROC_MAX_INDEX 3
 #define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUC_MAX_INDEX 3
 #define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUPP_MAX_INDEX 3
 #define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
-#define I40E_GLPRT_STDC_MAX_INDEX 3
-#define I40E_GLPRT_STDC_STDC_SHIFT 0
-#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDPC_MAX_INDEX 3
 #define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCL_MAX_INDEX 3
 #define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCH_MAX_INDEX 3
 #define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCL_MAX_INDEX 3
 #define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCH_MAX_INDEX 15
 #define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCL_MAX_INDEX 15
 #define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCH_MAX_INDEX 15
 #define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCL_MAX_INDEX 15
 #define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCH_MAX_INDEX 15
 #define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCL_MAX_INDEX 15
 #define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCH_MAX_INDEX 15
 #define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCL_MAX_INDEX 15
 #define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCH_MAX_INDEX 15
 #define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCL_MAX_INDEX 15
 #define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCH_MAX_INDEX 15
 #define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCL_MAX_INDEX 15
 #define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_RUPP_MAX_INDEX 15
 #define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_TDPC_MAX_INDEX 15
 #define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCH_MAX_INDEX 15
 #define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCL_MAX_INDEX 15
 #define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCH_MAX_INDEX 15
 #define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCL_MAX_INDEX 15
 #define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCH_MAX_INDEX 383
 #define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCL_MAX_INDEX 383
 #define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCH_MAX_INDEX 383
 #define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCL_MAX_INDEX 383
 #define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCH_MAX_INDEX 383
 #define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCL_MAX_INDEX 383
 #define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCH_MAX_INDEX 383
 #define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCL_MAX_INDEX 383
 #define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCH_MAX_INDEX 383
 #define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCL_MAX_INDEX 383
 #define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCH_MAX_INDEX 383
 #define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCL_MAX_INDEX 383
 #define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RDPC_MAX_INDEX 383
 #define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RUPP_MAX_INDEX 383
 #define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_TEPC_MAX_INDEX 383
 #define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCH_MAX_INDEX 383
 #define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCL_MAX_INDEX 383
 #define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCH_MAX_INDEX 383
 #define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCL_MAX_INDEX 383
 #define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
 #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRT_MSCCNT 0x00256BA0
-#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
-#define I40E_PRT_SCSTS 0x00256C20
-#define I40E_PRT_SCSTS_BSCA_SHIFT 0
-#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
-#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
-#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
-#define I40E_PRT_SCSTS_MSCA_SHIFT 2
-#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
-#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
-#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
-#define I40E_PRT_SWT_BSCCNT 0x00256C60
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
 #define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
 #define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
 #define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
 #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_CLKO_MAX_INDEX 1
 #define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
 #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
 #define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
 #define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
 #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
 #define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
 #define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480
-#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 8
-#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
 #define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
 #define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
 #define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
 #define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_TX_MAX_INDEX 127
 #define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
 #define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
 #define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
 #define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
 #define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
 #define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
 #define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
 #define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
 #define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
 #define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
 #define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
 #define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
 #define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
 #define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
 #define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
 #define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
 #define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
 #define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
 #define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
 #define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
 #define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
 #define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
 #define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
 #define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
 #define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
 #define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAH_MAX_INDEX 3
 #define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
 #define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
 #define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
 #define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAL_MAX_INDEX 3
 #define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
 #define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
 #define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
 #define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 #define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
 #define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
 #define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
 #define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
 #define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
 #define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
 #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
 #define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
 #define I40E_VFINT_ITR01_MAX_INDEX 2
 #define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_QRX_TAIL1_MAX_INDEX 15
 #define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
 #define I40E_QTX_TAIL1_MAX_INDEX 15
 #define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD_MAX_INDEX 16
 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG_MAX_INDEX 16
 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD_MAX_INDEX 16
 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_VFQF_HENA_MAX_INDEX 1
 #define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY_MAX_INDEX 12
 #define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT_MAX_INDEX 15
 #define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
 #define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
 #define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
 #define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION_MAX_INDEX 7
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 #endif
index e49f31dbd5d87cfdd4268c8837053746ef27c3dc..989866af26e5edeaa271ee693bb3286331e27a10 100644 (file)
@@ -39,6 +39,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
 }
 
 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_FD_CLEAN_DELAY 10
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_data: Packet data that will be filter parameters
@@ -50,7 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                             struct i40e_pf *pf, bool add)
 {
        struct i40e_filter_program_desc *fdir_desc;
-       struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_buffer *tx_buf, *first;
        struct i40e_tx_desc *tx_desc;
        struct i40e_ring *tx_ring;
        unsigned int fpt, dcc;
@@ -58,6 +59,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        struct device *dev;
        dma_addr_t dma;
        u32 td_cmd = 0;
+       u16 delay = 0;
        u16 i;
 
        /* find existing FDIR VSI */
@@ -71,6 +73,17 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_ring = vsi->tx_rings[0];
        dev = tx_ring->dev;
 
+       /* we need two descriptors to add/del a filter and we can wait */
+       do {
+               if (I40E_DESC_UNUSED(tx_ring) > 1)
+                       break;
+               msleep_interruptible(1);
+               delay++;
+       } while (delay < I40E_FD_CLEAN_DELAY);
+
+       if (!(I40E_DESC_UNUSED(tx_ring) > 1))
+               return -EAGAIN;
+
        dma = dma_map_single(dev, raw_packet,
                             I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, dma))
@@ -79,8 +92,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        /* grab the next descriptor */
        i = tx_ring->next_to_use;
        fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+       first = &tx_ring->tx_bi[i];
+       memset(first, 0, sizeof(struct i40e_tx_buffer));
 
-       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
 
        fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
              I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -100,8 +115,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
                       I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
-
        dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
        if (add)
@@ -124,6 +137,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
        }
 
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+       fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
        fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 
@@ -132,7 +147,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc = I40E_TX_DESC(tx_ring, i);
        tx_buf = &tx_ring->tx_bi[i];
 
-       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
+
+       memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
 
        /* record length, and DMA address */
        dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
@@ -141,6 +158,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc->buffer_addr = cpu_to_le64(dma);
        td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
+       tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
+       tx_buf->raw_buf = (void *)raw_packet;
+
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
@@ -148,14 +168,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_buf->time_stamp = jiffies;
 
        /* Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
+        * know there are new descriptors to fetch.
         */
        wmb();
 
        /* Mark the data descriptor to be watched */
-       tx_buf->next_to_watch = tx_desc;
+       first->next_to_watch = tx_desc;
 
        writel(tx_ring->next_to_use, tx_ring->tail);
        return 0;
@@ -170,24 +188,27 @@ dma_fail:
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                                   struct i40e_fdir_filter *fd_data,
-                                  u8 *raw_packet, bool add)
+                                  bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct udphdr *udp;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
        memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
 
        ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
@@ -220,19 +241,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                   struct i40e_fdir_filter *fd_data,
-                                  u8 *raw_packet, bool add)
+                                  bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct tcphdr *tcp;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        /* Dummy packet */
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -240,6 +261,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
                0x0, 0x72, 0, 0, 0, 0};
 
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
        memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
 
        ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
@@ -271,19 +295,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                         fd_data->pctype, ret);
        }
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-
-       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-       if (ret) {
-               dev_info(&pf->pdev->dev,
-                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                        fd_data->pctype, ret);
-               err = true;
-       } else {
-               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
-                         fd_data->pctype, ret);
-       }
-
        return err ? -EOPNOTSUPP : 0;
 }
 
@@ -292,14 +303,13 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
  * a specific flow spec
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Always returns -EOPNOTSUPP
  **/
 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
                                    struct i40e_fdir_filter *fd_data,
-                                   u8 *raw_packet, bool add)
+                                   bool add)
 {
        return -EOPNOTSUPP;
 }
@@ -310,33 +320,36 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
  * a specific flow spec
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                                  struct i40e_fdir_filter *fd_data,
-                                 u8 *raw_packet, bool add)
+                                 bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        int i;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0};
 
-       memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
-       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
-
-       ip->saddr = fd_data->src_ip[0];
-       ip->daddr = fd_data->dst_ip[0];
-       ip->protocol = 0;
-
        for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
             i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+               raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+               if (!raw_packet)
+                       return -ENOMEM;
+               memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+               ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+               ip->saddr = fd_data->src_ip[0];
+               ip->daddr = fd_data->dst_ip[0];
+               ip->protocol = 0;
+
                fd_data->pctype = i;
                ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
@@ -366,50 +379,34 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add)
 {
        struct i40e_pf *pf = vsi->back;
-       u8 *raw_packet;
        int ret;
 
-       /* Populate the Flow Director that we have at the moment
-        * and allocate the raw packet buffer for the calling functions
-        */
-       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
-       if (!raw_packet)
-               return -ENOMEM;
-
        switch (input->flow_type & ~FLOW_EXT) {
        case TCP_V4_FLOW:
-               ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
-                                             add);
+               ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                break;
        case UDP_V4_FLOW:
-               ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
-                                             add);
+               ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                break;
        case SCTP_V4_FLOW:
-               ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
-                                              add);
+               ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
                break;
        case IPV4_FLOW:
-               ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
-                                            add);
+               ret = i40e_add_del_fdir_ipv4(vsi, input, add);
                break;
        case IP_USER_FLOW:
                switch (input->ip4_proto) {
                case IPPROTO_TCP:
-                       ret = i40e_add_del_fdir_tcpv4(vsi, input,
-                                                     raw_packet, add);
+                       ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                        break;
                case IPPROTO_UDP:
-                       ret = i40e_add_del_fdir_udpv4(vsi, input,
-                                                     raw_packet, add);
+                       ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                        break;
                case IPPROTO_SCTP:
-                       ret = i40e_add_del_fdir_sctpv4(vsi, input,
-                                                      raw_packet, add);
+                       ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
                        break;
                default:
-                       ret = i40e_add_del_fdir_ipv4(vsi, input,
-                                                    raw_packet, add);
+                       ret = i40e_add_del_fdir_ipv4(vsi, input, add);
                        break;
                }
                break;
@@ -419,7 +416,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                ret = -EINVAL;
        }
 
-       kfree(raw_packet);
+       /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
        return ret;
 }
 
@@ -450,22 +447,24 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                         rx_desc->wb.qword0.hi_dword.fd_id);
 
                /* filter programming failed most likely due to table full */
-               fcnt_prog = i40e_get_current_fd_count(pf);
-               fcnt_avail = i40e_get_fd_cnt_all(pf);
+               fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+               fcnt_avail = pf->fdir_pf_filter_count;
                /* If ATR is running fcnt_prog can quickly change,
                 * if we are very close to full, it makes sense to disable
                 * FD ATR/SB and then re-enable it when there is room.
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        /* Turn off ATR first */
-                       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                           !(pf->auto_disable_flags &
+                             I40E_FLAG_FD_ATR_ENABLED)) {
                                dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
                                pf->auto_disable_flags |=
                                                       I40E_FLAG_FD_ATR_ENABLED;
                                pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
-                       } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-                               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+                                  !(pf->auto_disable_flags &
+                                    I40E_FLAG_FD_SB_ENABLED)) {
                                dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
@@ -491,7 +490,11 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
+               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buffer->raw_buf);
+               else
+                       dev_kfree_skb_any(tx_buffer->skb);
+
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
@@ -1701,7 +1704,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+       fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+       fdir_desc->fd_id = cpu_to_le32(0);
 }
 
 /**
@@ -1850,7 +1855,8 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
         * we are not already transmitting a packet to be timestamped
         */
        pf = i40e_netdev_to_pf(tx_ring->netdev);
-       if (pf->ptp_tx && !pf->ptp_tx_skb) {
+       if (pf->ptp_tx &&
+           !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                pf->ptp_tx_skb = skb_get(skb);
        } else {
@@ -2000,6 +2006,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
        context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+       context_desc->rsvd = cpu_to_le16(0);
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
@@ -2278,13 +2285,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       skb_tx_timestamp(skb);
-
        tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
 
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
+       skb_tx_timestamp(skb);
+
        /* always enable CRC insertion offload */
        td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
index 0277894fe1c46db030045d819484ccc6f2cb6f72..c1c356984b170cbefa5763202c60a0727cb870a1 100644 (file)
@@ -75,7 +75,6 @@ enum i40e_dyn_idx_t {
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
@@ -132,6 +131,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
+#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -140,7 +140,10 @@ enum i40e_dyn_idx_t {
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
        unsigned long time_stamp;
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *raw_buf;
+       };
        unsigned int bytecount;
        unsigned short gso_segs;
        DEFINE_DMA_UNMAP_ADDR(dma);
index 9d39ff23c5fbfbbed52ec0ba5437e585fc3a0c68..1fcf2205ffe619228af502c915c748812e34a774 100644 (file)
@@ -50,6 +50,9 @@
                                         (d) == I40E_DEV_ID_QSFP_B  || \
                                         (d) == I40E_DEV_ID_QSFP_C)
 
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
 #define I40E_MAX_CHAINED_RX_BUFFERS    5
@@ -137,6 +140,14 @@ enum i40e_fc_mode {
        I40E_FC_DEFAULT
 };
 
+enum i40e_set_fc_aq_failures {
+       I40E_SET_FC_AQ_FAIL_NONE = 0,
+       I40E_SET_FC_AQ_FAIL_GET = 1,
+       I40E_SET_FC_AQ_FAIL_SET = 2,
+       I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+       I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
 enum i40e_vsi_type {
        I40E_VSI_MAIN = 0,
        I40E_VSI_VMDQ1,
@@ -163,6 +174,7 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
+       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
@@ -234,6 +246,7 @@ struct i40e_mac_info {
        u8 addr[ETH_ALEN];
        u8 perm_addr[ETH_ALEN];
        u8 san_addr[ETH_ALEN];
+       u8 port_addr[ETH_ALEN];
        u16 max_fcoeq;
 };
 
@@ -875,7 +888,6 @@ enum i40e_filter_pctype {
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
        /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
-       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -1162,4 +1174,7 @@ enum i40e_reset_type {
        I40E_RESET_GLOBR        = 2,
        I40E_RESET_EMPR         = 3,
 };
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 #endif /* _I40E_TYPE_H_ */
index f5b9d20625736b5dc451ac32dc25cc38d7a0d99c..cafda0cfc1a96bb432bbd35cdcbceded7f1e6187 100644 (file)
@@ -347,10 +347,6 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        rx_ctx.dsize = 1;
 
        /* default values */
-       rx_ctx.tphrdesc_ena = 1;
-       rx_ctx.tphwdesc_ena = 1;
-       rx_ctx.tphdata_ena = 1;
-       rx_ctx.tphhead_ena = 1;
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.prefena = 1;
@@ -2077,6 +2073,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
        ether_addr_copy(vf->default_lan_addr.addr, mac);
        vf->pf_set_mac = true;
+       /* Force the VF driver stop so it has to reload with new MAC address */
+       i40e_vc_disable_vf(pf, vf);
        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
        ret = 0;
 
index eb67cce3e8f9a1bed6a0eabfaaeb7067acd703ef..8330744b02f10c3e39a10c64f6c201fb18047d66 100644 (file)
@@ -53,16 +53,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
                hw->aq.asq.tail = I40E_VF_ATQT1;
                hw->aq.asq.head = I40E_VF_ATQH1;
                hw->aq.asq.len  = I40E_VF_ATQLEN1;
+               hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+               hw->aq.asq.bah  = I40E_VF_ATQBAH1;
                hw->aq.arq.tail = I40E_VF_ARQT1;
                hw->aq.arq.head = I40E_VF_ARQH1;
                hw->aq.arq.len  = I40E_VF_ARQLEN1;
+               hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+               hw->aq.arq.bah  = I40E_VF_ARQBAH1;
        } else {
                hw->aq.asq.tail = I40E_PF_ATQT;
                hw->aq.asq.head = I40E_PF_ATQH;
                hw->aq.asq.len  = I40E_PF_ATQLEN;
+               hw->aq.asq.bal  = I40E_PF_ATQBAL;
+               hw->aq.asq.bah  = I40E_PF_ATQBAH;
                hw->aq.arq.tail = I40E_PF_ARQT;
                hw->aq.arq.head = I40E_PF_ARQH;
                hw->aq.arq.len  = I40E_PF_ARQLEN;
+               hw->aq.arq.bal  = I40E_PF_ARQBAL;
+               hw->aq.arq.bah  = I40E_PF_ARQBAH;
        }
 }
 
@@ -294,27 +302,18 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the transmit queue */
-               wr32(hw, I40E_VF_ATQBAH1,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQBAL1,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
-                                         I40E_VF_ATQLEN1_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ATQBAL1);
-       } else {
-               /* configure the transmit queue */
-               wr32(hw, I40E_PF_ATQBAH,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQBAL,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
-                                         I40E_PF_ATQLEN_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ATQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+                                 I40E_PF_ATQLEN_ATQENABLE_MASK));
+       wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+       wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.asq.bal);
        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -332,30 +331,21 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the receive queue */
-               wr32(hw, I40E_VF_ARQBAH1,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQBAL1,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
-                                         I40E_VF_ARQLEN1_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ARQBAL1);
-       } else {
-               /* configure the receive queue */
-               wr32(hw, I40E_PF_ARQBAH,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQBAL,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
-                                         I40E_PF_ARQLEN_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ARQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+                                 I40E_PF_ARQLEN_ARQENABLE_MASK));
+       wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+       wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
        /* Update tail in the HW to post pre-allocated buffers */
        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.arq.bal);
        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -497,6 +487,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.tail, 0);
        wr32(hw, hw->aq.asq.len, 0);
+       wr32(hw, hw->aq.asq.bal, 0);
+       wr32(hw, hw->aq.asq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.asq_mutex);
@@ -528,6 +520,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.tail, 0);
        wr32(hw, hw->aq.arq.len, 0);
+       wr32(hw, hw->aq.arq.bal, 0);
+       wr32(hw, hw->aq.arq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.arq_mutex);
@@ -573,6 +567,9 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
        /* Set up register offsets */
        i40e_adminq_init_regs(hw);
 
+       /* setup ASQ command write back timeout */
+       hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
        /* allocate the ASQ */
        ret_code = i40e_init_asq(hw);
        if (ret_code)
@@ -630,6 +627,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "%s: ntc %d head %d.\n", __func__, ntc,
+                          rd32(hw, hw->aq.asq.head));
+
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
                                        (I40E_ADMINQ_CALLBACK)details->callback;
@@ -690,6 +691,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        struct i40e_aq_desc *desc_on_ring;
        bool cmd_completed = false;
        u16  retval = 0;
+       u32  val = 0;
+
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: head overrun at %d\n", val);
+               status = I40E_ERR_QUEUE_EMPTY;
+               goto asq_send_command_exit;
+       }
 
        if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -783,6 +793,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        }
 
        /* bump the tail */
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
        i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
        (hw->aq.asq.next_to_use)++;
        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
@@ -806,7 +817,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                        /* ugh! delay while spin_lock */
                        udelay(delay_len);
                        total_delay += delay_len;
-               } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+               } while (total_delay < hw->aq.asq_cmd_timeout);
        }
 
        /* if ready, copy the desc back to temp */
@@ -820,6 +831,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                                   I40E_DEBUG_AQ_MESSAGE,
                                   "AQTX: Command completed with error 0x%X.\n",
                                   retval);
+
                        /* strip off FW internal code */
                        retval &= 0xff;
                }
@@ -834,6 +846,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        if (i40e_is_nvm_update_op(desc))
                hw->aq.nvm_busy = true;
 
+       if (le16_to_cpu(desc->datalen) == buff_size) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: desc and buffer writeback:\n");
+               i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
+       }
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -905,10 +923,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
        /* now clean the next descriptor */
        desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
        desc_idx = ntc;
-       i40evf_debug_aq(hw,
-                     I40E_DEBUG_AQ_COMMAND,
-                     (void *)desc,
-                     hw->aq.arq.r.arq_bi[desc_idx].va);
 
        flags = le16_to_cpu(desc->flags);
        if (flags & I40E_AQ_FLAG_ERR) {
@@ -931,6 +945,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
        if (i40e_is_nvm_update_op(&e->desc))
                hw->aq.nvm_busy = false;
 
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+       i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
+
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
         * size
index e3472c62e1554194740a9233aadedaa5f13077e3..162845589bf7ab1673a2c87b37bd47499d1e7c4f 100644 (file)
@@ -56,6 +56,8 @@ struct i40e_adminq_ring {
        u32 head;
        u32 tail;
        u32 len;
+       u32 bah;
+       u32 bal;
 };
 
 /* ASQ transaction details */
@@ -82,6 +84,7 @@ struct i40e_arq_event_info {
 struct i40e_adminq_info {
        struct i40e_adminq_ring arq;    /* receive queue */
        struct i40e_adminq_ring asq;    /* send queue */
+       u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
        u16 num_arq_entries;            /* receive queue depth */
        u16 num_asq_entries;            /* send queue depth */
        u16 arq_buf_size;               /* receive queue buffer size */
index a43155afdbe24784d750f5fa8878e57a100913c8..4ea90bf239bb204ed5603b66680413191f46bf66 100644 (file)
@@ -551,6 +551,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
+       struct i40e_asq_cmd_details details;
        i40e_status status;
 
        i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
@@ -565,7 +566,6 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
                desc.datalen = cpu_to_le16(msglen);
        }
        if (!cmd_details) {
-               struct i40e_asq_cmd_details details;
                memset(&details, 0, sizeof(details));
                details.async = true;
                cmd_details = &details;
index a2ad9a4e399da70707a9ed984588a8c2cde04686..931c880443003d7aa3a1787ddd1b5f27241236cd 100644 (file)
@@ -127,7 +127,7 @@ struct i40e_hmc_info {
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
                (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +146,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
index d6f762241537804be777b97ad5f29616f990058f..a5d79877354cc6be02cd7777ffd1577b468f9cc3 100644 (file)
@@ -32,16 +32,22 @@ struct i40e_hw;
 
 /* HMC element context information */
 
-/* Rx queue context data */
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
 struct i40e_hmc_obj_rxq {
        u16 head;
-       u8  cpuid;
+       u16 cpuid; /* bigger than needed, see above for reason */
        u64 base;
        u16 qlen;
 #define I40E_RXQ_CTX_DBUFF_SHIFT 7
-       u8  dbuff;
+       u16 dbuff; /* bigger than needed, see above for reason */
 #define I40E_RXQ_CTX_HBUFF_SHIFT 6
-       u8  hbuff;
+       u16 hbuff; /* bigger than needed, see above for reason */
        u8  dtype;
        u8  dsize;
        u8  crcstrip;
@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
        u8  hsplit_0;
        u8  hsplit_1;
        u8  showiv;
-       u16 rxmax;
+       u32 rxmax; /* bigger than needed, see above for reason */
        u8  tphrdesc_ena;
        u8  tphwdesc_ena;
        u8  tphdata_ena;
        u8  tphhead_ena;
-       u8  lrxqthresh;
+       u16 lrxqthresh; /* bigger than needed, see above for reason */
        u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
-/* Tx queue context data */
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
 struct i40e_hmc_obj_txq {
        u16 head;
        u8  new_context;
@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
        u8  fd_ena;
        u8  alt_vlan_ena;
        u16 thead_wb;
-       u16 cpuid;
+       u cpuid;
        u8  head_wb_ena;
        u16 qlen;
        u8  tphrdesc_ena;
index 3698396558186f67736fae33294b9993ccf8abad..c1f6a59bfea017cb9bcd064f238d5d08a415fb30 100644 (file)
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-
-#define I40E_PF_ARQBAH 0x00080180
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
 #define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
 #define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
 #define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
 #define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
 #define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
 #define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
 #define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
 #define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
 #define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAH_MAX_INDEX 127
 #define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAL_MAX_INDEX 127
 #define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQH_MAX_INDEX 127
 #define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQLEN_MAX_INDEX 127
 #define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQT_MAX_INDEX 127
 #define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAH_MAX_INDEX 127
 #define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAL_MAX_INDEX 127
 #define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQH_MAX_INDEX 127
 #define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQLEN_MAX_INDEX 127
 #define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQT_MAX_INDEX 127
 #define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
 #define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
 #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
 #define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
 #define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
 #define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
 #define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
 #define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
 #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
 #define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
 #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
 #define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
 #define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
 #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
 #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
 #define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
 #define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
 #define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
 #define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
 #define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
 #define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
 #define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
 #define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
 #define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
 #define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
 #define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
 #define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
 #define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
 #define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TDPUC 0x00044100
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
 #define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
-#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
 #define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
 #define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
 #define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
 #define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
 #define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
 #define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
 #define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
 #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
 #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
 #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
 #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
 #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CCMD_MAX_INDEX 3
 #define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
 #define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
 #define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
 #define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
 #define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
 #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSCA_MAX_INDEX 3
 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
 #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
 #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
 #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_PE_ENA 0x000B81A0
-#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
-#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
 #define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
 #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
 #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
 #define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
 #define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
 #define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
 #define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
 #define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
 #define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
 #define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
 #define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
 #define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
 #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
 #define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
 #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000
-#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
-#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
 #define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
 #define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
 #define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
 #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
 #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
 #define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
 #define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFGEN_RSTAT1_MAX_INDEX 127
 #define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RSTAT_MAX_INDEX 383
 #define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RTRIG_MAX_INDEX 383
 #define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
 #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
 #define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
 #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
 #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
 #define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
 #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
 #define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
 #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_SDPART_MAX_INDEX 15
 #define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
 #define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
 #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
 #define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
 #define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
 #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
 #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
 #define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
 #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
 #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_UFUSE 0x00094008
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
 #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
 #define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
 #define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
 #define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
 #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
 #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
 #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_PFINT_CEQCTL_MAX_INDEX 511
 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
 #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
 #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
 #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
 #define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
 #define I40E_PFINT_ITR0_MAX_INDEX 2
 #define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_ITRN_MAX_INDEX 2
 #define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_LNKLSTN_MAX_INDEX 511
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
 #define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
 #define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_RATEN_MAX_INDEX 511
 #define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_RQCTL_MAX_INDEX 1535
 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_TQCTL_MAX_INDEX 1535
 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
 #define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_MAX_INDEX 127
 #define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_ITR0_MAX_INDEX 2
 #define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPINT_AEQCTL_MAX_INDEX 127
 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_VPINT_CEQCTL_MAX_INDEX 511
 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLST0_MAX_INDEX 127
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLSTN_MAX_INDEX 511
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_RATE0_MAX_INDEX 127
 #define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
 #define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_RATEN_MAX_INDEX 511
 #define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
 #define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
 #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
 #define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
 #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-
-#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
 #define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QRX_TAIL_MAX_INDEX 1535
 #define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_CTL_MAX_INDEX 1535
 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
 #define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
 #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_ENA_MAX_INDEX 1535
 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
 #define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_HEAD_MAX_INDEX 1535
 #define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
 #define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_TAIL_MAX_INDEX 1535
 #define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_MAPENA_MAX_INDEX 127
 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_QTABLE_MAX_INDEX 15
 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QBASE_MAX_INDEX 383
 #define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QTABLE_MAX_INDEX 7
 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
 #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
 #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
 #define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
 #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HLCTLA 0x001E4760
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSECTL1 0x001E3560
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
 #define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
-#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
 #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
 #define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
 #define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
 #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
 #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
 #define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_METF_MAX_INDEX 3
 #define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
 #define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
 #define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
 #define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
 #define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
 #define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAH_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAL_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
 #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
 #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
 #define I40E_MSIX_PBA_MAX_INDEX 5
 #define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TADD_MAX_INDEX 128
 #define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TMSG_MAX_INDEX 128
 #define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TUADD_MAX_INDEX 128
 #define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TVCTRL_MAX_INDEX 128
 #define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG1_MAX_INDEX 639
 #define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
 #define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
 #define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
 #define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
 #define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
 #define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
 #define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
 #define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
 #define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
 #define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
 #define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
 #define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
 #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
 #define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
 #define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
 #define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
 #define I40E_GLNVM_PROTCSR_MAX_INDEX 59
 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
 #define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
 #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
 #define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
 #define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-
-#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
 #define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
 #define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
 #define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
 #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
 #define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
 #define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
 #define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
 #define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
 #define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
 #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
 #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
 #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
 #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PCITEST2 0x000BE4BC
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
-
-#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
 #define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
 #define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
 #define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
 #define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
 #define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
 #define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SUBSYSID 0x000BE48C
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
-#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
 #define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
 #define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
 #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
 #define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
 #define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
 #define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
 #define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
 #define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
 #define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
 #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
 #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
 #define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
 #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
 #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
 #define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PFDEVID 0x000BE080
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
 #define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
 #define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_VFDEVID 0x000BE100
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
 #define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
 #define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-
-#define I40E_PFPE_MRTEIDXMASK 0x00008600
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
 #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
 #define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
 #define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
 #define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
 #define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
 #define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
 #define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
 #define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
 #define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
 #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
 #define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
 #define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
 #define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
 #define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DHW_MAX_INDEX 7
 #define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DLW_MAX_INDEX 7
 #define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DPS_MAX_INDEX 7
 #define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SHT_MAX_INDEX 7
 #define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
 #define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SLT_MAX_INDEX 7
 #define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
 #define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
 #define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
 #define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
 #define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
 #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
 #define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
 #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
 #define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
 #define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
 #define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
 #define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
 #define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_HSYM_MAX_INDEX 63
 #define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_GLQF_PCNT_MAX_INDEX 511
 #define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_SWAP_MAX_INDEX 1
 #define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
 #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
 #define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
 #define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
 #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_PFQF_HENA_MAX_INDEX 1
 #define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_PFQF_HKEY_MAX_INDEX 12
 #define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
 #define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
 #define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
 #define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_PFQF_HLUT_MAX_INDEX 127
 #define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
 #define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
 #define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
 #define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
 #define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
 #define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
 #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
 #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HENA1_MAX_INDEX 1
 #define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
 #define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
 #define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
 #define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
 #define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
 #define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION1_MAX_INDEX 7
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPQF_CTL_MAX_INDEX 127
 #define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
 #define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_CTL_MAX_INDEX 383
 #define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_TCREGION_MAX_INDEX 3
 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOECRC_MAX_INDEX 143
 #define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDDPC_MAX_INDEX 143
 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-/* _i=0...143 */
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXVC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOELAST_MAX_INDEX 143
 #define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPRC_MAX_INDEX 143
 #define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPTC_MAX_INDEX 143
 #define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOERPDC_MAX_INDEX 143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
 #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
 #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
 #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
 #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCH_MAX_INDEX 3
 #define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCL_MAX_INDEX 3
 #define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCH_MAX_INDEX 3
 #define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCL_MAX_INDEX 3
 #define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ILLERRC_MAX_INDEX 3
 #define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LDPC_MAX_INDEX 3
 #define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MLFC_MAX_INDEX 3
 #define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCH_MAX_INDEX 3
 #define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCL_MAX_INDEX 3
 #define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCH_MAX_INDEX 3
 #define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCL_MAX_INDEX 3
 #define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MRFC_MAX_INDEX 3
 #define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127H_MAX_INDEX 3
 #define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127L_MAX_INDEX 3
 #define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255H_MAX_INDEX 3
 #define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255L_MAX_INDEX 3
 #define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511H_MAX_INDEX 3
 #define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511L_MAX_INDEX 3
 #define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64H_MAX_INDEX 3
 #define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64L_MAX_INDEX 3
 #define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127H_MAX_INDEX 3
 #define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127L_MAX_INDEX 3
 #define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255H_MAX_INDEX 3
 #define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255L_MAX_INDEX 3
 #define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511H_MAX_INDEX 3
 #define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511L_MAX_INDEX 3
 #define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64H_MAX_INDEX 3
 #define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64L_MAX_INDEX 3
 #define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RDPC_MAX_INDEX 3
 #define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RFC_MAX_INDEX 3
 #define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RJC_MAX_INDEX 3
 #define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RLEC_MAX_INDEX 3
 #define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ROC_MAX_INDEX 3
 #define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUC_MAX_INDEX 3
 #define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUPP_MAX_INDEX 3
 #define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
-#define I40E_GLPRT_STDC_MAX_INDEX 3
-#define I40E_GLPRT_STDC_STDC_SHIFT 0
-#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDPC_MAX_INDEX 3
 #define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCL_MAX_INDEX 3
 #define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCH_MAX_INDEX 3
 #define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCL_MAX_INDEX 3
 #define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCH_MAX_INDEX 15
 #define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCL_MAX_INDEX 15
 #define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCH_MAX_INDEX 15
 #define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCL_MAX_INDEX 15
 #define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCH_MAX_INDEX 15
 #define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCL_MAX_INDEX 15
 #define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCH_MAX_INDEX 15
 #define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCL_MAX_INDEX 15
 #define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCH_MAX_INDEX 15
 #define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCL_MAX_INDEX 15
 #define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCH_MAX_INDEX 15
 #define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCL_MAX_INDEX 15
 #define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_RUPP_MAX_INDEX 15
 #define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_TDPC_MAX_INDEX 15
 #define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCH_MAX_INDEX 15
 #define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCL_MAX_INDEX 15
 #define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCH_MAX_INDEX 15
 #define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCL_MAX_INDEX 15
 #define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCH_MAX_INDEX 383
 #define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCL_MAX_INDEX 383
 #define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCH_MAX_INDEX 383
 #define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCL_MAX_INDEX 383
 #define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCH_MAX_INDEX 383
 #define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCL_MAX_INDEX 383
 #define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCH_MAX_INDEX 383
 #define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCL_MAX_INDEX 383
 #define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCH_MAX_INDEX 383
 #define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCL_MAX_INDEX 383
 #define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCH_MAX_INDEX 383
 #define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCL_MAX_INDEX 383
 #define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RDPC_MAX_INDEX 383
 #define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RUPP_MAX_INDEX 383
 #define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_TEPC_MAX_INDEX 383
 #define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCH_MAX_INDEX 383
 #define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCL_MAX_INDEX 383
 #define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCH_MAX_INDEX 383
 #define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCL_MAX_INDEX 383
 #define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
 #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRT_MSCCNT 0x00256BA0
-#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
-#define I40E_PRT_SCSTS 0x00256C20
-#define I40E_PRT_SCSTS_BSCA_SHIFT 0
-#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
-#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
-#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
-#define I40E_PRT_SCSTS_MSCA_SHIFT 2
-#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
-#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
-#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
-#define I40E_PRT_SWT_BSCCNT 0x00256C60
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
 #define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
 #define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
 #define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
 #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_CLKO_MAX_INDEX 1
 #define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
 #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
 #define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
 #define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
 #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
 #define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
 #define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480
-#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 8
-#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
 #define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
 #define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
 #define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
 #define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_TX_MAX_INDEX 127
 #define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
 #define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
 #define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
 #define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
 #define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
 #define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
 #define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
 #define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
 #define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
 #define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
 #define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
 #define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
 #define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
 #define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
 #define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
 #define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
 #define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
 #define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
 #define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
 #define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
 #define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
 #define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
 #define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
 #define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
 #define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
 #define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAH_MAX_INDEX 3
 #define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
 #define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
 #define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
 #define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAL_MAX_INDEX 3
 #define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
 #define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
 #define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
 #define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 #define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
 #define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
 #define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
 #define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
 #define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
 #define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
 #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
 #define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
 #define I40E_VFINT_ITR01_MAX_INDEX 2
 #define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_QRX_TAIL1_MAX_INDEX 15
 #define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
 #define I40E_QTX_TAIL1_MAX_INDEX 15
 #define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD_MAX_INDEX 16
 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG_MAX_INDEX 16
 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD_MAX_INDEX 16
 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_VFQF_HENA_MAX_INDEX 1
 #define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY_MAX_INDEX 12
 #define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT_MAX_INDEX 15
 #define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
 #define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
 #define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
 #define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION_MAX_INDEX 7
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 #endif
index 48ebb6cd69f28608b73fb0e22ff5b8d1a35f0389..b342f212e91fb04bf218cacff06d19b9f1529589 100644 (file)
@@ -50,7 +50,11 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
+               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buffer->raw_buf);
+               else
+                       dev_kfree_skb_any(tx_buffer->skb);
+
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
@@ -1336,6 +1340,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
        context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+       context_desc->rsvd = cpu_to_le16(0);
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
index 30d248bc5d199d50ba14cc79356d689c2265aebc..8bc6858163b0f1b668e56d3c0d682574cc4582d5 100644 (file)
@@ -75,7 +75,6 @@ enum i40e_dyn_idx_t {
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
@@ -131,6 +130,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
+#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -139,7 +139,10 @@ enum i40e_dyn_idx_t {
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
        unsigned long time_stamp;
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *raw_buf;
+       };
        unsigned int bytecount;
        unsigned short gso_segs;
        DEFINE_DMA_UNMAP_ADDR(dma);
index d3cf5a69de54d6e7e7bc648dd478873b88510b5c..6dd72ad58e7d846de3d51e21e17d3701d46404d1 100644 (file)
@@ -50,6 +50,9 @@
                                         (d) == I40E_DEV_ID_QSFP_B  || \
                                         (d) == I40E_DEV_ID_QSFP_C)
 
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
 #define I40E_MAX_CHAINED_RX_BUFFERS    5
@@ -137,6 +140,14 @@ enum i40e_fc_mode {
        I40E_FC_DEFAULT
 };
 
+enum i40e_set_fc_aq_failures {
+       I40E_SET_FC_AQ_FAIL_NONE = 0,
+       I40E_SET_FC_AQ_FAIL_GET = 1,
+       I40E_SET_FC_AQ_FAIL_SET = 2,
+       I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+       I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
 enum i40e_vsi_type {
        I40E_VSI_MAIN = 0,
        I40E_VSI_VMDQ1,
@@ -163,6 +174,7 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
+       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
@@ -875,7 +887,6 @@ enum i40e_filter_pctype {
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
        /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
-       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -1162,4 +1173,7 @@ enum i40e_reset_type {
        I40E_RESET_GLOBR        = 2,
        I40E_RESET_EMPR         = 3,
 };
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 #endif /* _I40E_TYPE_H_ */
index 60407a9df0c1ef5103b4e0f198f556b229a9417a..e70e4cdb0eb2114a12a35a88a0059f0e28564903 100644 (file)
@@ -632,7 +632,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
        u32 hlut_val;
        int i, j;
 
-       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
                hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
                indir[j++] = hlut_val & 0xff;
                indir[j++] = (hlut_val >> 8) & 0xff;
@@ -659,7 +659,7 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
        u32 hlut_val;
        int i, j;
 
-       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
                hlut_val = indir[j++];
                hlut_val |= indir[j++] << 8;
                hlut_val |= indir[j++] << 16;
index 7fc5f3b5d6bf610b936ae229dd31cf93435c2ee4..ed1eb123052256c621f59290d349d22bd39d3cd3 100644 (file)
@@ -34,9 +34,9 @@ static int i40evf_close(struct net_device *netdev);
 
 char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
-       "Intel(R) XL710 X710 Virtual Function Network Driver";
+       "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "0.9.34"
+#define DRV_VERSION "0.9.40"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -260,6 +260,12 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
        int i;
        uint32_t dyn_ctl;
 
+       if (mask & 1) {
+               dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
+               dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                          I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+               wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
+       }
        for (i = 1; i < adapter->num_msix_vectors; i++) {
                if (mask & (1 << i)) {
                        dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
@@ -278,6 +284,7 @@ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
 {
        struct i40e_hw *hw = &adapter->hw;
 
+       i40evf_misc_irq_enable(adapter);
        i40evf_irq_enable_queues(adapter, ~0);
 
        if (flush)
@@ -2006,7 +2013,6 @@ static void i40evf_init_task(struct work_struct *work)
                }
                adapter->state = __I40EVF_INIT_VERSION_CHECK;
                goto restart;
-               break;
        case __I40EVF_INIT_VERSION_CHECK:
                if (!i40evf_asq_done(hw)) {
                        dev_err(&pdev->dev, "Admin queue command never completed\n");
@@ -2018,17 +2024,20 @@ static void i40evf_init_task(struct work_struct *work)
                if (err) {
                        dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
                                err);
+                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+                               dev_info(&pdev->dev, "Resending request\n");
+                               err = i40evf_send_api_ver(adapter);
+                       }
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Unable send config request (%d)\n",
+                       dev_err(&pdev->dev, "Unable to send config request (%d)\n",
                                err);
                        goto err;
                }
                adapter->state = __I40EVF_INIT_GET_RESOURCES;
                goto restart;
-               break;
        case __I40EVF_INIT_GET_RESOURCES:
                /* aq msg sent, awaiting reply */
                if (!adapter->vf_res) {
@@ -2408,7 +2417,9 @@ static void i40evf_remove(struct pci_dev *pdev)
                i40evf_reset_interrupt_capability(adapter);
        }
 
-       del_timer_sync(&adapter->watchdog_timer);
+       if (adapter->watchdog_timer.function)
+               del_timer_sync(&adapter->watchdog_timer);
+
        flush_scheduled_work();
 
        if (hw->aq.asq.count)
index 2dc0bac7671789fa32cec15d143c217c9b99fef8..66d12f5b4ca83d0de07c7b8fbc567ccaa6def1dd 100644 (file)
@@ -80,8 +80,9 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
  * @adapter: adapter structure
  *
  * Compare API versions with the PF. Must be called after admin queue is
- * initialized. Returns 0 if API versions match, -EIO if
- * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ * initialized. Returns 0 if API versions match, -EIO if they do not,
+ * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
+ * from the firmware are propagated.
  **/
 int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
 {
@@ -102,13 +103,13 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
                goto out_alloc;
 
        err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
-       if (err) {
-               err = -EIO;
+       if (err)
                goto out_alloc;
-       }
 
        if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
            I40E_VIRTCHNL_OP_VERSION) {
+               dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
+                        le32_to_cpu(event.desc.cookie_high));
                err = -EIO;
                goto out_alloc;
        }
@@ -247,11 +248,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
                vqpi++;
        }
 
+       adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
                           (u8 *)vqci, len);
        kfree(vqci);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
 }
 
 /**
@@ -274,10 +275,10 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
        vqs.vsi_id = adapter->vsi_res->vsi_id;
        vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
        vqs.rx_queues = vqs.tx_queues;
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
-                          (u8 *)&vqs, sizeof(vqs));
        adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+                          (u8 *)&vqs, sizeof(vqs));
 }
 
 /**
@@ -300,10 +301,10 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
        vqs.vsi_id = adapter->vsi_res->vsi_id;
        vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
        vqs.rx_queues = vqs.tx_queues;
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
-                          (u8 *)&vqs, sizeof(vqs));
        adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+                          (u8 *)&vqs, sizeof(vqs));
 }
 
 /**
@@ -351,11 +352,11 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
        vimi->vecmap[v_idx].txq_map = 0;
        vimi->vecmap[v_idx].rxq_map = 0;
 
+       adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
                           (u8 *)vimi, len);
        kfree(vimi);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
 }
 
 /**
@@ -412,12 +413,11 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
                        f->add = false;
                }
        }
+       adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
                           (u8 *)veal, len);
        kfree(veal);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
 }
 
 /**
@@ -474,11 +474,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
                        kfree(f);
                }
        }
+       adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
                           (u8 *)veal, len);
        kfree(veal);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 }
 
 /**
@@ -535,10 +535,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
                        f->add = false;
                }
        }
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
-       kfree(vvfl);
        adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+       kfree(vvfl);
 }
 
 /**
@@ -596,10 +596,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
                        kfree(f);
                }
        }
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
-       kfree(vvfl);
        adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+       kfree(vvfl);
 }
 
 /**
index ee74f9536b31b9d71471a351d7b8ad96c375da41..236a6183a86523a3aef0430b3a575081b32ba7b5 100644 (file)
@@ -579,7 +579,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_MAC_INIT;
-               break;
        }
 
        /* Set media type */
@@ -837,7 +836,6 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
                default:
                        ret_val = -E1000_ERR_PHY;
                        goto out;
-                       break;
                }
                ret_val = igb_get_phy_id(hw);
                goto out;
index a9537ba7a5a072630acc8842b875d98428922bb5..4d2dc17fd31b0076e71015d476a78dc46bf0e72a 100644 (file)
@@ -1630,6 +1630,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
                igb_power_up_phy_copper(&adapter->hw);
        else
                igb_power_up_serdes_link_82575(&adapter->hw);
+
+       igb_setup_link(&adapter->hw);
 }
 
 /**
index 15609331ec170ddc6bb9e62c4227107fc6b50cad..206171f732fbc9375e68b134b1e765176b4db927 100644 (file)
@@ -430,7 +430,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
-               break;
        }
 
        /* Set 802.3x based flow control settings. */
index bc7c924240a52490d50531060783e0f13bc97f20..0373a5b9219fdf5011c3051db2c0ed05bbbf15e4 100644 (file)
@@ -432,7 +432,6 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
        default:
                status = IXGBE_ERR_LINK_SETUP;
                goto out;
-               break;
        }
 
        if (hw->phy.multispeed_fiber) {
@@ -2035,7 +2034,6 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
                else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
                        physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
                goto out;
-               break;
        case IXGBE_AUTOC_LMS_10G_SERIAL:
                if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
                        physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
@@ -2052,10 +2050,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
                if (autoc & IXGBE_AUTOC_KR_SUPP)
                        physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
                goto out;
-               break;
        default:
                goto out;
-               break;
        }
 
 sfp_check:
index 4e5385a2a4658c8c5f0dc9ea4ee279d08693351d..3f318c52e0531ab2e5b18a8b674b7e36bba6982a 100644 (file)
@@ -216,7 +216,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
-               break;
        }
 
        if (hw->mac.type != ixgbe_mac_X540) {
@@ -2179,7 +2178,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
                hw_dbg(hw, "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
-               break;
        }
 
        /* Set 802.3x based flow control settings. */
index 5172b6b12c097679b9f9b532869b500ea6cb4124..75bcb2e084910f2ccf27a9bfd2608a4523128f8b 100644 (file)
@@ -460,7 +460,6 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
                        break;
                default:
                        return -EINVAL;
-                       break;
                }
        } else {
                return -EINVAL;
@@ -495,10 +494,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
  * @id: id is either ether type or TCP/UDP port number
  *
  * Returns : on success, returns a non-zero 802.1p user priority bitmap
- * otherwise returns 0 as the invalid user priority bitmap to indicate an
+ * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an
  * error.
  */
-static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct dcb_app app = {
@@ -507,7 +506,7 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
                             };
 
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return 0;
+               return -EINVAL;
 
        return dcb_getapp(netdev, &app);
 }
index a452730a327864111d72d13679fc47c2be870b7c..94a1c07efeb0b8b6b1915084bdb7d52c0a5aaacb 100644 (file)
@@ -1408,7 +1408,6 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
        default:
                *data = 1;
                return 1;
-               break;
        }
 
        /*
@@ -2866,7 +2865,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                break;
        default:
                return ethtool_op_get_ts_info(dev, info);
-               break;
        }
        return 0;
 }
index 68f87ecb8a762da7e950a61ae2c9058bb61f5e8e..5fd4b5271f9a19b5df9b06ebb8a1b81fbdbe8eb1 100644 (file)
 #define IXGBE_OVERFLOW_PERIOD    (HZ * 30)
 #define IXGBE_PTP_TX_TIMEOUT     (HZ * 15)
 
-#ifndef NSECS_PER_SEC
-#define NSECS_PER_SEC 1000000000ULL
-#endif
+/* half of a one second clock period, for use with PPS signal. We have to use
+ * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in
+ * order to force at least 64bits of precision for shifting
+ */
+#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL
 
 /**
  * ixgbe_ptp_setup_sdp
@@ -146,8 +148,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
                          IXGBE_TSAUXC_SDP0_INT);
 
                /* clock period (or pulse length) */
-               clktiml = (u32)(NSECS_PER_SEC << shift);
-               clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
+               clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift);
+               clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32);
 
                /*
                 * Account for the cyclecounter wrap-around value by
@@ -158,8 +160,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
                clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
                ns = timecounter_cyc2time(&adapter->tc, clock_edge);
 
-               div_u64_rem(ns, NSECS_PER_SEC, &rem);
-               clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift);
+               div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem);
+               clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift);
 
                /* specify the initial clock start time */
                trgttiml = (u32)clock_edge;
index 68e6a6613e9a1ccfd6a45f10d0ca0fb53c4c3dd6..1b4fc7c639e6842ec8498a28d8e41ccb2402660e 100644 (file)
@@ -54,6 +54,14 @@ config MVNETA
          driver, which should be used for the older Marvell SoCs
          (Dove, Orion, Discovery, Kirkwood).
 
+config MVPP2
+       tristate "Marvell Armada 375 network interface support"
+       depends on MACH_ARMADA_375
+       select MVMDIO
+       ---help---
+         This driver supports the network interface units in the
+         Marvell ARMADA 375 SoC.
+
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
        depends on CPU_PXA168
index 5c4a7765ff0efbf7094fade03febe101b9abdcc2..f6425bd2884b82dea97aed88e41419fd20cacbae 100644 (file)
@@ -5,6 +5,7 @@
 obj-$(CONFIG_MVMDIO) += mvmdio.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_MVNETA) += mvneta.o
+obj-$(CONFIG_MVPP2) += mvpp2.o
 obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
new file mode 100644 (file)
index 0000000..4f6e4a1
--- /dev/null
@@ -0,0 +1,6393 @@
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)      (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)      (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG              0x60
+#define MVPP2_RX_FIFO_INIT_REG                 0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port)                        (0x140 + 4 * (port))
+#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)   (((s) & 0xfff) << 16)
+#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK  BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool)          (0x180 + 4 * (pool))
+#define     MVPP2_POOL_BUF_SIZE_OFFSET         5
+#define MVPP2_RXQ_CONFIG_REG(rxq)              (0x800 + 4 * (rxq))
+#define     MVPP2_SNOOP_PKT_SIZE_MASK          0x1ff
+#define     MVPP2_SNOOP_BUF_HDR_MASK           BIT(9)
+#define     MVPP2_RXQ_POOL_SHORT_OFFS          20
+#define     MVPP2_RXQ_POOL_SHORT_MASK          0x700000
+#define     MVPP2_RXQ_POOL_LONG_OFFS           24
+#define     MVPP2_RXQ_POOL_LONG_MASK           0x7000000
+#define     MVPP2_RXQ_PACKET_OFFSET_OFFS       28
+#define     MVPP2_RXQ_PACKET_OFFSET_MASK       0x70000000
+#define     MVPP2_RXQ_DISABLE_MASK             BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG              0x1000
+#define     MVPP2_PRS_PORT_LU_MAX              0xf
+#define     MVPP2_PRS_PORT_LU_MASK(port)       (0xff << ((port) * 4))
+#define     MVPP2_PRS_PORT_LU_VAL(port, val)   ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port)          (0x1004 + ((port) & 4))
+#define     MVPP2_PRS_INIT_OFF_MASK(port)      (0x3f << (((port) % 4) * 8))
+#define     MVPP2_PRS_INIT_OFF_VAL(port, val)  ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port)           (0x100c + ((port) & 4))
+#define     MVPP2_PRS_MAX_LOOP_MASK(port)      (0xff << (((port) % 4) * 8))
+#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)  ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG                 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx)           (0x1104 + (idx) * 4)
+#define     MVPP2_PRS_TCAM_INV_MASK            BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG                 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx)           (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG                        0x1230
+#define     MVPP2_PRS_TCAM_EN_MASK             BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG                     0x1800
+#define     MVPP2_CLS_MODE_ACTIVE_MASK         BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG                 0x1810
+#define     MVPP2_CLS_PORT_WAY_MASK(port)      (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG                        0x1814
+#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS       6
+#define MVPP2_CLS_LKP_TBL_REG                  0x1818
+#define     MVPP2_CLS_LKP_TBL_RXQ_MASK         0xff
+#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK   BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG               0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG                        0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG                        0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG                        0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)   (0x1980 + ((port) * 4))
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS    3
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK    0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port)         (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG              0x19d0
+#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)   (1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG                      0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG                        0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG                        0x2048
+#define     MVPP2_RXQ_DESC_SIZE_MASK           0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)       (0x3000 + 4 * (rxq))
+#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET     0
+#define     MVPP2_RXQ_NUM_NEW_OFFSET           16
+#define MVPP2_RXQ_STATUS_REG(rxq)              (0x3400 + 4 * (rxq))
+#define     MVPP2_RXQ_OCCUPIED_MASK            0x3fff
+#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET      16
+#define     MVPP2_RXQ_NON_OCCUPIED_MASK                0x3fff0000
+#define MVPP2_RXQ_THRESH_REG                   0x204c
+#define     MVPP2_OCCUPIED_THRESH_OFFSET       0
+#define     MVPP2_OCCUPIED_THRESH_MASK         0x3fff
+#define MVPP2_RXQ_INDEX_REG                    0x2050
+#define MVPP2_TXQ_NUM_REG                      0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG                        0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG                        0x2088
+#define     MVPP2_TXQ_DESC_SIZE_MASK           0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG              0x2090
+#define MVPP2_TXQ_THRESH_REG                   0x2094
+#define     MVPP2_TRANSMITTED_THRESH_OFFSET    16
+#define     MVPP2_TRANSMITTED_THRESH_MASK      0x3fff0000
+#define MVPP2_TXQ_INDEX_REG                    0x2098
+#define MVPP2_TXQ_PREF_BUF_REG                 0x209c
+#define     MVPP2_PREF_BUF_PTR(desc)           ((desc) & 0xfff)
+#define     MVPP2_PREF_BUF_SIZE_4              (BIT(12) | BIT(13))
+#define     MVPP2_PREF_BUF_SIZE_16             (BIT(12) | BIT(14))
+#define     MVPP2_PREF_BUF_THRESH(val)         ((val) << 17)
+#define     MVPP2_TXQ_DRAIN_EN_MASK            BIT(31)
+#define MVPP2_TXQ_PENDING_REG                  0x20a0
+#define     MVPP2_TXQ_PENDING_MASK             0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG               0x20a4
+#define MVPP2_TXQ_SENT_REG(txq)                        (0x3c00 + 4 * (txq))
+#define     MVPP2_TRANSMITTED_COUNT_OFFSET     16
+#define     MVPP2_TRANSMITTED_COUNT_MASK       0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG                 0x20b0
+#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET                16
+#define MVPP2_TXQ_RSVD_RSLT_REG                        0x20b4
+#define     MVPP2_TXQ_RSVD_RSLT_MASK           0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG                 0x20b8
+#define     MVPP2_TXQ_RSVD_CLR_OFFSET          16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)      (0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)      (0x2140 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK      0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)         (0x2180 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_PENDING_MASK                0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)          (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w)                      (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w)                      (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w)                     (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE                 0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)                (0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq)           (0x5400 + 4 * (rxq))
+#define MVPP2_ISR_ENABLE_REG(port)             (0x5420 + 4 * (port))
+#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)   ((mask) & 0xffff)
+#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)  (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port)                (0x5480 + 4 * (port))
+#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK        0xffff
+#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK        0xff0000
+#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK   BIT(24)
+#define     MVPP2_CAUSE_FCS_ERR_MASK           BIT(25)
+#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK  BIT(26)
+#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK  BIT(29)
+#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK  BIT(30)
+#define     MVPP2_CAUSE_MISC_SUM_MASK          BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port)         (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG           0x54bc
+#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK    0xffff
+#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK    0x3fc00000
+#define     MVPP2_PON_CAUSE_MISC_SUM_MASK              BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG               0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool)           (0x6000 + ((pool) * 4))
+#define     MVPP2_BM_POOL_BASE_ADDR_MASK       0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool)           (0x6040 + ((pool) * 4))
+#define     MVPP2_BM_POOL_SIZE_MASK            0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool)       (0x6080 + ((pool) * 4))
+#define     MVPP2_BM_POOL_GET_READ_PTR_MASK    0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)       (0x60c0 + ((pool) * 4))
+#define     MVPP2_BM_POOL_PTRS_NUM_MASK                0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool)       (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)       (0x6140 + ((pool) * 4))
+#define     MVPP2_BM_BPPI_PTR_NUM_MASK         0x7ff
+#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK   BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool)           (0x6200 + ((pool) * 4))
+#define     MVPP2_BM_START_MASK                        BIT(0)
+#define     MVPP2_BM_STOP_MASK                 BIT(1)
+#define     MVPP2_BM_STATE_MASK                        BIT(4)
+#define     MVPP2_BM_LOW_THRESH_OFFS           8
+#define     MVPP2_BM_LOW_THRESH_MASK           0x7f00
+#define     MVPP2_BM_LOW_THRESH_VALUE(val)     ((val) << \
+                                               MVPP2_BM_LOW_THRESH_OFFS)
+#define     MVPP2_BM_HIGH_THRESH_OFFS          16
+#define     MVPP2_BM_HIGH_THRESH_MASK          0x7f0000
+#define     MVPP2_BM_HIGH_THRESH_VALUE(val)    ((val) << \
+                                               MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool)          (0x6240 + ((pool) * 4))
+#define     MVPP2_BM_RELEASED_DELAY_MASK       BIT(0)
+#define     MVPP2_BM_ALLOC_FAILED_MASK         BIT(1)
+#define     MVPP2_BM_BPPE_EMPTY_MASK           BIT(2)
+#define     MVPP2_BM_BPPE_FULL_MASK            BIT(3)
+#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK     BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool)           (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool)           (0x6400 + ((pool) * 4))
+#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK      BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG                        0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool)             (0x6480 + ((pool) * 4))
+#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK      BIT(0)
+#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK      BIT(1)
+#define     MVPP2_BM_PHY_RLS_GRNTD_MASK                BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG                  0x64c0
+#define MVPP2_BM_MC_RLS_REG                    0x64c4
+#define     MVPP2_BM_MC_ID_MASK                        0xfff
+#define     MVPP2_BM_FORCE_RELEASE_MASK                BIT(12)
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG         0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG              0x8004
+#define     MVPP2_TXP_SCHED_ENQ_MASK           0xff
+#define     MVPP2_TXP_SCHED_DISQ_OFFSET                8
+#define MVPP2_TXP_SCHED_CMD_1_REG              0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG             0x8018
+#define MVPP2_TXP_SCHED_MTU_REG                        0x801c
+#define     MVPP2_TXP_MTU_MAX                  0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG             0x8020
+#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK   0x7ffff
+#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK   0x3ff00000
+#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)    ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG         0x8024
+#define     MVPP2_TXP_TOKEN_SIZE_MAX           0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q)          (0x8040 + ((q) << 2))
+#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK   0x7ffff
+#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK   0x3ff00000
+#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)    ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)      (0x8060 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_SIZE_MAX           0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)      (0x8080 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_CNTR_MAX           0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG                     0x8800
+#define MVPP2_TX_PORT_FLUSH_REG                        0x8810
+#define     MVPP2_TX_PORT_FLUSH_MASK(port)     (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE                  0x24
+#define MVPP2_SRC_ADDR_HIGH                    0x28
+#define MVPP2_MIB_COUNTERS_BASE(port)          (0x1000 + ((port) >> 1) * \
+                                               0x400 + (port) * 0x400)
+#define     MVPP2_MIB_LATE_COLLISION           0x7c
+#define MVPP2_ISR_SUM_MASK_REG                 0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG     0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT          0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG                  0x0
+#define      MVPP2_GMAC_PORT_EN_MASK           BIT(0)
+#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS       2
+#define      MVPP2_GMAC_MAX_RX_SIZE_MASK       0x7ffc
+#define      MVPP2_GMAC_MIB_CNTR_EN_MASK       BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG                  0x4
+#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK   BIT(0)
+#define      MVPP2_GMAC_GMII_LB_EN_MASK                BIT(5)
+#define      MVPP2_GMAC_PCS_LB_EN_BIT          6
+#define      MVPP2_GMAC_PCS_LB_EN_MASK         BIT(6)
+#define      MVPP2_GMAC_SA_LOW_OFFS            7
+#define MVPP2_GMAC_CTRL_2_REG                  0x8
+#define      MVPP2_GMAC_INBAND_AN_MASK         BIT(0)
+#define      MVPP2_GMAC_PCS_ENABLE_MASK                BIT(3)
+#define      MVPP2_GMAC_PORT_RGMII_MASK                BIT(4)
+#define      MVPP2_GMAC_PORT_RESET_MASK                BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG              0xc
+#define      MVPP2_GMAC_FORCE_LINK_DOWN                BIT(0)
+#define      MVPP2_GMAC_FORCE_LINK_PASS                BIT(1)
+#define      MVPP2_GMAC_CONFIG_MII_SPEED       BIT(5)
+#define      MVPP2_GMAC_CONFIG_GMII_SPEED      BIT(6)
+#define      MVPP2_GMAC_AN_SPEED_EN            BIT(7)
+#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX     BIT(12)
+#define      MVPP2_GMAC_AN_DUPLEX_EN           BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG         0x1c
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS    6
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK        0x1fc0
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+                                       MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK     0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+       (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH  15
+#define MVPP2_RX_COAL_PKTS             32
+#define MVPP2_RX_COAL_USEC             100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE                  2
+#define MVPP2_ETH_TYPE_LEN             2
+#define MVPP2_PPPOE_HDR_SIZE           8
+#define MVPP2_VLAN_TAG_LEN             4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE             0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE    32
+#define MVPP2_TX_CSUM_MAX_SIZE         9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC  1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC  1000
+
+#define MVPP2_TX_MTU_MAX               0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT                        16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS                        4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ                  8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ                  8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ              4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM            (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD                  128
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD                  1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK           64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE            256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE                32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN            (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE   0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE   0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT     0x80
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+       SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+       ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+             ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+
+#define MVPP2_RX_BUF_SIZE(pkt_size)    ((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size)  ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+       ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_BIT_TO_BYTE(bit)         ((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE         16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK               BIT(0)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+       MVPP2_TAG_TYPE_NONE = 0,
+       MVPP2_TAG_TYPE_MH   = 1,
+       MVPP2_TAG_TYPE_DSA  = 2,
+       MVPP2_TAG_TYPE_EDSA = 3,
+       MVPP2_TAG_TYPE_VLAN = 4,
+       MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE       256
+#define MVPP2_PRS_TCAM_WORDS           6
+#define MVPP2_PRS_SRAM_WORDS           4
+#define MVPP2_PRS_FLOW_ID_SIZE         64
+#define MVPP2_PRS_FLOW_ID_MASK         0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID   1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT  BIT(5)
+#define MVPP2_PRS_IPV4_HEAD            0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK       0xf0
+#define MVPP2_PRS_IPV4_MC              0xe0
+#define MVPP2_PRS_IPV4_MC_MASK         0xf0
+#define MVPP2_PRS_IPV4_BC_MASK         0xff
+#define MVPP2_PRS_IPV4_IHL             0x5
+#define MVPP2_PRS_IPV4_IHL_MASK                0xf
+#define MVPP2_PRS_IPV6_MC              0xff
+#define MVPP2_PRS_IPV6_MC_MASK         0xff
+#define MVPP2_PRS_IPV6_HOP_MASK                0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK      0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L    0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX                100
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS                      8
+#define MVPP2_PRS_PORT_MASK                    0xff
+#define MVPP2_PRS_LU_MASK                      0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs)         \
+                                   (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)      \
+                                             (((offs) * 2) - ((offs) % 2)  + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE                 16
+#define MVPP2_PRS_TCAM_PORT_BYTE               17
+#define MVPP2_PRS_TCAM_LU_BYTE                 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs)           ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD                        5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL              0
+#define MVPP2_PE_FIRST_FREE_TID                1
+#define MVPP2_PE_LAST_FREE_TID         (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN      (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6            (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW     (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW    (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED           (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED         (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED            (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED          (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED     (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED   (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED      (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED    (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT            (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT           (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN          (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN          (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL              (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE             (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL            (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS       (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS   (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS                 0
+#define MVPP2_PRS_SRAM_RI_WORD                 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS            32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD            1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS            32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS              64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT          72
+#define MVPP2_PRS_SRAM_UDF_OFFS                        73
+#define MVPP2_PRS_SRAM_UDF_BITS                        8
+#define MVPP2_PRS_SRAM_UDF_MASK                        0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT            81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS           82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK           0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3             1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4             4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS       85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK       0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD                1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD    2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD    3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS         87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS         2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK         0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD          0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD      2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD      3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS                89
+#define MVPP2_PRS_SRAM_AI_OFFS                 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS            98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS            8
+#define MVPP2_PRS_SRAM_AI_MASK                 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS            106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK            0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT             110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT              111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK               0x1
+#define MVPP2_PRS_RI_DSA_MASK                  0x2
+#define MVPP2_PRS_RI_VLAN_MASK                 0xc
+#define MVPP2_PRS_RI_VLAN_NONE                 ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE               BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE               BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE               (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK             0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC          BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK              0x600
+#define MVPP2_PRS_RI_L2_UCAST                  ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST                  BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST                  BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK                        0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK             0x7000
+#define MVPP2_PRS_RI_L3_UN                     ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4                    BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT                        BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER              (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6                    BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT                        (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP                    (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK              0x18000
+#define MVPP2_PRS_RI_L3_UCAST                  ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST                  BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST                  (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK              0x20000
+#define MVPP2_PRS_RI_UDF3_MASK                 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL           BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK             0x1c00000
+#define MVPP2_PRS_RI_L4_TCP                    BIT(22)
+#define MVPP2_PRS_RI_L4_UDP                    BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER                  (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK                 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE             BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK                 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT              BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT           BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT              BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT           BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT       BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT                BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI               0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT              BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED               true
+#define MVPP2_PRS_UNTAGGED             false
+#define MVPP2_PRS_EDSA                 true
+#define MVPP2_PRS_DSA                  false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+       MVPP2_PRS_UDF_MAC_DEF,
+       MVPP2_PRS_UDF_MAC_RANGE,
+       MVPP2_PRS_UDF_L2_DEF,
+       MVPP2_PRS_UDF_L2_DEF_COPY,
+       MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+       MVPP2_PRS_LU_MH,
+       MVPP2_PRS_LU_MAC,
+       MVPP2_PRS_LU_DSA,
+       MVPP2_PRS_LU_VLAN,
+       MVPP2_PRS_LU_L2,
+       MVPP2_PRS_LU_PPPOE,
+       MVPP2_PRS_LU_IP4,
+       MVPP2_PRS_LU_IP6,
+       MVPP2_PRS_LU_FLOWS,
+       MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+       MVPP2_PRS_L3_UNI_CAST,
+       MVPP2_PRS_L3_MULTI_CAST,
+       MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE       512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE         64
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM             8
+#define MVPP2_BM_LONG_BUF_NUM          1024
+#define MVPP2_BM_SHORT_BUF_NUM         2048
+#define MVPP2_BM_POOL_SIZE_MAX         (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN                128
+#define MVPP2_BM_SWF_LONG_POOL(port)   ((port > 2) ? 2 : port)
+#define MVPP2_BM_SWF_SHORT_POOL                3
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS      8
+#define MVPP2_BM_COOKIE_CPU_OFFS       24
+
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE                MVPP2_RX_MAX_PKT_SIZE(512)
+
+enum mvpp2_bm_type {
+       MVPP2_BM_FREE,
+       MVPP2_BM_SWF_LONG,
+       MVPP2_BM_SWF_SHORT
+};
+
+/* Definitions */
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+       /* Shared registers' base addresses */
+       void __iomem *base;
+       void __iomem *lms_base;
+
+       /* Common clocks */
+       struct clk *pp_clk;
+       struct clk *gop_clk;
+
+       /* List of pointers to port structures */
+       struct mvpp2_port **port_list;
+
+       /* Aggregated TXQs */
+       struct mvpp2_tx_queue *aggr_txqs;
+
+       /* BM pools */
+       struct mvpp2_bm_pool *bm_pools;
+
+       /* PRS shadow table */
+       struct mvpp2_prs_shadow *prs_shadow;
+       /* PRS auxiliary table for double vlan entries control */
+       bool *prs_double_vlans;
+
+       /* Tclk value */
+       u32 tclk;
+};
+
+struct mvpp2_pcpu_stats {
+       struct  u64_stats_sync syncp;
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
+};
+
+struct mvpp2_port {
+       u8 id;
+
+       int irq;
+
+       struct mvpp2 *priv;
+
+       /* Per-port registers' base address */
+       void __iomem *base;
+
+       struct mvpp2_rx_queue **rxqs;
+       struct mvpp2_tx_queue **txqs;
+       struct net_device *dev;
+
+       int pkt_size;
+
+       u32 pending_cause_rx;
+       struct napi_struct napi;
+
+       /* Flags */
+       unsigned long flags;
+
+       u16 tx_ring_size;
+       u16 rx_ring_size;
+       struct mvpp2_pcpu_stats __percpu *stats;
+
+       struct phy_device *phy_dev;
+       phy_interface_t phy_interface;
+       struct device_node *phy_node;
+       unsigned int link;
+       unsigned int duplex;
+       unsigned int speed;
+
+       struct mvpp2_bm_pool *pool_long;
+       struct mvpp2_bm_pool *pool_short;
+
+       /* Index of first port's physical RXQ */
+       u8 first_rxq;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT         0
+#define MVPP2_TXD_IP_HLEN_SHIFT                8
+#define MVPP2_TXD_L4_CSUM_FRAG         BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT          BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE      BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE      BIT(23)
+#define MVPP2_TXD_L4_UDP               BIT(24)
+#define MVPP2_TXD_L3_IP6               BIT(26)
+#define MVPP2_TXD_L_DESC               BIT(28)
+#define MVPP2_TXD_F_DESC               BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY          BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK                (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC              0x0
+#define MVPP2_RXD_ERR_OVERRUN          BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE         (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS      16
+#define MVPP2_RXD_BM_POOL_ID_MASK      (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC             BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK           BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR       BIT(24)
+#define MVPP2_RXD_L4_TCP               BIT(25)
+#define MVPP2_RXD_L4_UDP               BIT(26)
+#define MVPP2_RXD_L3_IP4               BIT(28)
+#define MVPP2_RXD_L3_IP6               BIT(30)
+#define MVPP2_RXD_BUF_HDR              BIT(31)
+
+struct mvpp2_tx_desc {
+       u32 command;            /* Options used by HW for packet transmitting.*/
+       u8  packet_offset;      /* the offset from the buffer beginning */
+       u8  phys_txq;           /* destination queue ID                 */
+       u16 data_size;          /* data size of transmitted packet in bytes */
+       u32 buf_phys_addr;      /* physical addr of transmitted buffer  */
+       u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
+       u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
+       u32 reserved2;          /* reserved (for future use)            */
+};
+
+struct mvpp2_rx_desc {
+       u32 status;             /* info about received packet           */
+       u16 reserved1;          /* parser_info (for future use, PnC)    */
+       u16 data_size;          /* size of received packet in bytes     */
+       u32 buf_phys_addr;      /* physical address of the buffer       */
+       u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
+       u16 reserved2;          /* gem_port_id (for future use, PON)    */
+       u16 reserved3;          /* csum_l4 (for future use, PnC)        */
+       u8  reserved4;          /* bm_qset (for future use, BM)         */
+       u8  reserved5;
+       u16 reserved6;          /* classify_info (for future use, PnC)  */
+       u32 reserved7;          /* flow_id (for future use, PnC) */
+       u32 reserved8;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+       int cpu;
+
+       /* Number of Tx DMA descriptors in the descriptor ring */
+       int size;
+
+       /* Number of currently used Tx DMA descriptor in the
+        * descriptor ring
+        */
+       int count;
+
+       /* Number of Tx DMA descriptors reserved for each CPU */
+       int reserved_num;
+
+       /* Array of transmitted skb */
+       struct sk_buff **tx_skb;
+
+       /* Index of last TX DMA descriptor that was inserted */
+       int txq_put_index;
+
+       /* Index of the TX DMA descriptor to be cleaned up */
+       int txq_get_index;
+};
+
+struct mvpp2_tx_queue {
+       /* Physical number of this Tx queue */
+       u8 id;
+
+       /* Logical number of this Tx queue */
+       u8 log_id;
+
+       /* Number of Tx DMA descriptors in the descriptor ring */
+       int size;
+
+       /* Number of currently used Tx DMA descriptor in the descriptor ring */
+       int count;
+
+       /* Per-CPU control of physical Tx queues */
+       struct mvpp2_txq_pcpu __percpu *pcpu;
+
+       /* Array of transmitted skb */
+       struct sk_buff **tx_skb;
+
+       u32 done_pkts_coal;
+
+       /* Virtual address of thex Tx DMA descriptors array */
+       struct mvpp2_tx_desc *descs;
+
+       /* DMA address of the Tx DMA descriptors array */
+       dma_addr_t descs_phys;
+
+       /* Index of the last Tx DMA descriptor */
+       int last_desc;
+
+       /* Index of the next Tx DMA descriptor to process */
+       int next_desc_to_proc;
+};
+
+struct mvpp2_rx_queue {
+       /* RX queue number, in the range 0-31 for physical RXQs */
+       u8 id;
+
+       /* Num of rx descriptors in the rx descriptor ring */
+       int size;
+
+       u32 pkts_coal;
+       u32 time_coal;
+
+       /* Virtual address of the RX DMA descriptors array */
+       struct mvpp2_rx_desc *descs;
+
+       /* DMA address of the RX DMA descriptors array */
+       dma_addr_t descs_phys;
+
+       /* Index of the last RX DMA descriptor */
+       int last_desc;
+
+       /* Index of the next RX DMA descriptor to process */
+       int next_desc_to_proc;
+
+       /* ID of port to which physical RXQ is mapped */
+       int port;
+
+       /* Port's logic RXQ number to which physical RXQ is mapped */
+       int logic_rxq;
+};
+
+union mvpp2_prs_tcam_entry {
+       u32 word[MVPP2_PRS_TCAM_WORDS];
+       u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+       u32 word[MVPP2_PRS_SRAM_WORDS];
+       u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+       u32 index;
+       union mvpp2_prs_tcam_entry tcam;
+       union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+       bool valid;
+       bool finish;
+
+       /* Lookup ID */
+       int lu;
+
+       /* User defined offset */
+       int udf;
+
+       /* Result info */
+       u32 ri;
+       u32 ri_mask;
+};
+
+struct mvpp2_cls_flow_entry {
+       u32 index;
+       u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+       u32 lkpid;
+       u32 way;
+       u32 data;
+};
+
+struct mvpp2_bm_pool {
+       /* Pool number in the range 0-7 */
+       int id;
+       enum mvpp2_bm_type type;
+
+       /* Buffer Pointers Pool External (BPPE) size */
+       int size;
+       /* Number of buffers for this pool */
+       int buf_num;
+       /* Pool buffer size */
+       int buf_size;
+       /* Packet size */
+       int pkt_size;
+
+       /* BPPE virtual base address */
+       u32 *virt_addr;
+       /* BPPE physical base address */
+       dma_addr_t phys_addr;
+
+       /* Ports using BM pool */
+       u32 port_map;
+
+       /* Occupied buffers indicator */
+       atomic_t in_use;
+       int in_use_thresh;
+
+       spinlock_t lock;
+};
+
+struct mvpp2_buff_hdr {
+       u32 next_buff_phys_addr;
+       u32 next_buff_virt_addr;
+       u16 byte_count;
+       u16 info;
+       u8  reserved1;          /* bm_qset (for future use, BM)         */
+};
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK    0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info)   ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS     12
+#define MVPP2_B_HDR_INFO_LAST_MASK     BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+          ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Static declaractions */
+
+/* Number of RXQs used by single port */
+static int rxq_number = MVPP2_DEFAULT_RXQ;
+/* Number of TXQs used by single port */
+static int txq_number = MVPP2_MAX_TXQ;
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+       writel(data, priv->base + offset);
+}
+
+static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+       return readl(priv->base + offset);
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+       txq_pcpu->txq_get_index++;
+       if (txq_pcpu->txq_get_index == txq_pcpu->size)
+               txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+                             struct sk_buff *skb)
+{
+       txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+       txq_pcpu->txq_put_index++;
+       if (txq_pcpu->txq_put_index == txq_pcpu->size)
+               txq_pcpu->txq_put_index = 0;
+}
+
+/* Get number of physical egress port */
+static inline int mvpp2_egress_port(struct mvpp2_port *port)
+{
+       return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+       return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Parser configuration routines */
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+       int i;
+
+       if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+               return -EINVAL;
+
+       /* Clear entry invalidation bit */
+       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+       /* Write tcam index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+       /* Write sram index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+               mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+       return 0;
+}
+
+/* Read tcam entry from hw */
+static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+       int i;
+
+       if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+               return -EINVAL;
+
+       /* Write tcam index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+                             MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+       if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+               return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+               pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+
+       /* Write sram index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+               pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+
+       return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+{
+       /* Write index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+       mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+                   MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
+{
+       priv->prs_shadow[index].valid = true;
+       priv->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
+                                   unsigned int ri, unsigned int ri_mask)
+{
+       priv->prs_shadow[index].ri_mask = ri_mask;
+       priv->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+       pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+                                   unsigned int port, bool add)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       if (add)
+               pe->tcam.byte[enable_off] &= ~(1 << port);
+       else
+               pe->tcam.byte[enable_off] |= 1 << port;
+}
+
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+                                       unsigned int ports)
+{
+       unsigned char port_mask = MVPP2_PRS_PORT_MASK;
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+       pe->tcam.byte[enable_off] &= ~port_mask;
+       pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+}
+
+/* Obtain port map from tcam sw entry */
+static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+                                        unsigned int offs, unsigned char byte,
+                                        unsigned char enable)
+{
+       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+                                        unsigned int offs, unsigned char *byte,
+                                        unsigned char *enable)
+{
+       *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+       *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
+                                   u16 data)
+{
+       int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
+       u16 tcam_data;
+
+       tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+       if (tcam_data != data)
+               return false;
+       return true;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int enable)
+{
+       int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+       for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+
+               if (!(enable & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       pe->tcam.byte[ai_idx] |= 1 << i;
+               else
+                       pe->tcam.byte[ai_idx] &= ~(1 << i);
+       }
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+}
+
+/* Get ai bits from tcam sw entry */
+static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+       return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+                                 unsigned short ethertype)
+{
+       mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+       mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+                                   int val)
+{
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+                                     int val)
+{
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int mask)
+{
+       unsigned int i;
+
+       for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+               int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+
+               if (!(mask & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+               else
+                       mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+       }
+}
+
+/* Obtain ri bits from sram sw entry */
+static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+       return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int mask)
+{
+       unsigned int i;
+       int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+
+       for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+
+               if (!(mask & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+               else
+                       mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+       }
+}
+
+/* Read ai bits from sram sw entry */
+static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+       u8 bits;
+       int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+       int ai_en_off = ai_off + 1;
+       int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+       bits = (pe->sram.byte[ai_off] >> ai_shift) |
+              (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+       return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+                                      unsigned int lu)
+{
+       int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+       mvpp2_prs_sram_bits_clear(pe, sram_next_off,
+                                 MVPP2_PRS_SRAM_NEXT_LU_MASK);
+       mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+                                    unsigned int op)
+{
+       /* Set sign */
+       if (shift < 0) {
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+               shift = 0 - shift;
+       } else {
+               mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+       }
+
+       /* Set value */
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+                                                          (unsigned char)shift;
+
+       /* Reset and set operation */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+       /* Set base offset as current */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+                                     unsigned int type, int offset,
+                                     unsigned int op)
+{
+       /* Set sign */
+       if (offset < 0) {
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+               offset = 0 - offset;
+       } else {
+               mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+       }
+
+       /* Set value */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+                                 MVPP2_PRS_SRAM_UDF_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_UDF_BITS)] &=
+             ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_UDF_BITS)] |=
+                               (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+       /* Set offset type */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+                                 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+       /* Set offset operation */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+                                            ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+                                   (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+                            (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+       /* Set base offset as current */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+       for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+               u8 bits;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+               bits = mvpp2_prs_sram_ai_get(pe);
+
+               /* Sram store classification lookup ID in AI bits [5:0] */
+               if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
+                                    unsigned char end)
+{
+       int tid;
+
+       if (start > end)
+               swap(start, end);
+
+       if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
+               end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+
+       for (tid = start; tid <= end; tid++) {
+               if (!priv->prs_shadow[tid].valid)
+                       return tid;
+       }
+
+       return -EINVAL;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+{
+       struct mvpp2_prs_entry pe;
+
+       if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+               /* Entry exist - update port only */
+               pe.index = MVPP2_PE_DROP_ALL;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = MVPP2_PE_DROP_ALL;
+
+               /* Non-promiscuous mode for all ports - DROP unknown packets */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                        MVPP2_PRS_RI_DROP_MASK);
+
+               mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set port to promiscuous mode */
+static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
+{
+       struct mvpp2_prs_entry pe;
+
+       /* Promiscous mode - Accept unknown packets */
+
+       if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+               /* Entry exist - update port only */
+               pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+
+               /* Continue - set next lookup */
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+               /* Set result info bits */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+                                        MVPP2_PRS_RI_L2_CAST_MASK);
+
+               /* Shift to ethertype */
+               mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Accept multicast */
+static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
+                                   bool add)
+{
+       struct mvpp2_prs_entry pe;
+       unsigned char da_mc;
+
+       /* Ethernet multicast address first byte is
+        * 0x01 for IPv4 and 0x33 for IPv6
+        */
+       da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+       if (priv->prs_shadow[index].valid) {
+               /* Entry exist - update port only */
+               pe.index = index;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = index;
+
+               /* Continue - set next lookup */
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+               /* Set result info bits */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+                                        MVPP2_PRS_RI_L2_CAST_MASK);
+
+               /* Update tcam entry data first byte */
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
+
+               /* Shift to ethertype */
+               mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+                                 bool tagged, bool extend)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, shift;
+
+       if (extend) {
+               tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+               shift = 8;
+       } else {
+               tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+               shift = 4;
+       }
+
+       if (priv->prs_shadow[tid].valid) {
+               /* Entry exist - update port only */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+               pe.index = tid;
+
+               /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+               mvpp2_prs_sram_shift_set(&pe, shift,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+               if (tagged) {
+                       /* Set tagged bit in DSA tag */
+                       mvpp2_prs_tcam_data_byte_set(&pe, 0,
+                                                    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+                                                    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+                       /* Clear all ai bits for next iteration */
+                       mvpp2_prs_sram_ai_update(&pe, 0,
+                                                MVPP2_PRS_SRAM_AI_MASK);
+                       /* If packet is tagged continue check vlans */
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+               } else {
+                       /* Set result info bits to 'no vlans' */
+                       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+               }
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+                                           bool add, bool tagged, bool extend)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, shift, port_mask;
+
+       if (extend) {
+               tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+                     MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+               port_mask = 0;
+               shift = 8;
+       } else {
+               tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+                     MVPP2_PE_ETYPE_DSA_UNTAGGED;
+               port_mask = MVPP2_PRS_PORT_MASK;
+               shift = 4;
+       }
+
+       if (priv->prs_shadow[tid].valid) {
+               /* Entry exist - update port only */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+               pe.index = tid;
+
+               /* Set ethertype */
+               mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+               mvpp2_prs_match_etype(&pe, 2, 0);
+
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+                                        MVPP2_PRS_RI_DSA_MASK);
+               /* Shift ethertype + 2 byte reserved + tag*/
+               mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+               if (tagged) {
+                       /* Set tagged bit in DSA tag */
+                       mvpp2_prs_tcam_data_byte_set(&pe,
+                                                    MVPP2_ETH_TYPE_LEN + 2 + 3,
+                                                MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+                                                MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+                       /* Clear all ai bits for next iteration */
+                       mvpp2_prs_sram_ai_update(&pe, 0,
+                                                MVPP2_PRS_SRAM_AI_MASK);
+                       /* If packet is tagged continue check vlans */
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+               } else {
+                       /* Set result info bits to 'no vlans' */
+                       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+               }
+               /* Mask/unmask all ports, depending on dsa type */
+               mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
+                                                  unsigned short tpid, int ai)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+       /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int ri_bits, ai_bits;
+               bool match;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+                       continue;
+
+               pe->index = tid;
+
+               mvpp2_prs_hw_read(priv, pe);
+               match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
+               if (!match)
+                       continue;
+
+               /* Get vlan type */
+               ri_bits = mvpp2_prs_sram_ri_get(pe);
+               ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+               /* Get current ai value from tcam */
+               ai_bits = mvpp2_prs_tcam_ai_get(pe);
+               /* Clear double vlan bit */
+               ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+               if (ai != ai_bits)
+                       continue;
+
+               if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+                   ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+                             unsigned int port_map)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid_aux, tid;
+
+       pe = mvpp2_prs_vlan_find(priv, tpid, ai);
+
+       if (!pe) {
+               /* Create new tcam entry */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
+                                               MVPP2_PE_FIRST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               /* Get last double vlan tid */
+               for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+                    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+                       unsigned int ri_bits;
+
+                       if (!priv->prs_shadow[tid_aux].valid ||
+                           priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+                               continue;
+
+                       pe->index = tid_aux;
+                       mvpp2_prs_hw_read(priv, pe);
+                       ri_bits = mvpp2_prs_sram_ri_get(pe);
+                       if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+                           MVPP2_PRS_RI_VLAN_DOUBLE)
+                               break;
+               }
+
+               if (tid <= tid_aux)
+                       return -EINVAL;
+
+               memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               pe->index = tid;
+
+               mvpp2_prs_match_etype(pe, 0, tpid);
+
+               mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+               /* Shift 4 bytes - skip 1 vlan tag */
+               mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+               /* Clear all ai bits for next iteration */
+               mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+               if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+                       mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+               } else {
+                       ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+                       mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+               }
+               mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+               mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+       }
+       /* Update ports' mask */
+       mvpp2_prs_tcam_port_map_set(pe, port_map);
+
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+
+       return 0;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
+{
+       int i;
+
+       for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+               if (!priv->prs_double_vlans[i])
+                       return i;
+       }
+
+       return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
+                                                         unsigned short tpid1,
+                                                         unsigned short tpid2)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+       /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int ri_mask;
+               bool match;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+
+               match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
+                       && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
+
+               if (!match)
+                       continue;
+
+               ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+               if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+                                    unsigned short tpid2,
+                                    unsigned int port_map)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid_aux, tid, ai;
+
+       pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+       if (!pe) {
+               /* Create new tcam entry */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                               MVPP2_PE_LAST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               /* Set ai value for new double vlan entry */
+               ai = mvpp2_prs_double_vlan_ai_free_get(priv);
+               if (ai < 0)
+                       return ai;
+
+               /* Get first single/triple vlan tid */
+               for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+                    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+                       unsigned int ri_bits;
+
+                       if (!priv->prs_shadow[tid_aux].valid ||
+                           priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+                               continue;
+
+                       pe->index = tid_aux;
+                       mvpp2_prs_hw_read(priv, pe);
+                       ri_bits = mvpp2_prs_sram_ri_get(pe);
+                       ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+                       if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+                           ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+                               break;
+               }
+
+               if (tid >= tid_aux)
+                       return -ERANGE;
+
+               memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               pe->index = tid;
+
+               priv->prs_double_vlans[ai] = true;
+
+               mvpp2_prs_match_etype(pe, 0, tpid1);
+               mvpp2_prs_match_etype(pe, 4, tpid2);
+
+               mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               /* Shift 8 bytes - skip 2 vlan tags */
+               mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+               mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+                                        MVPP2_PRS_RI_VLAN_MASK);
+               mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+                                        MVPP2_PRS_SRAM_AI_MASK);
+
+               mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+       }
+
+       /* Update ports' mask */
+       mvpp2_prs_tcam_port_map_set(pe, port_map);
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+       return 0;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
+                              unsigned int ri, unsigned int ri_mask)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+           (proto != IPPROTO_IGMP))
+               return -EINVAL;
+
+       /* Fragmented packet */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = tid;
+
+       /* Set next lu to IPv4 */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L4 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct iphdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
+                                ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Not fragmented packet */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+       mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+       struct mvpp2_prs_entry pe;
+       int mask, tid;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = tid;
+
+       switch (l3_cast) {
+       case MVPP2_PRS_L3_MULTI_CAST:
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+                                            MVPP2_PRS_IPV4_MC_MASK);
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+                                        MVPP2_PRS_RI_L3_ADDR_MASK);
+               break;
+       case  MVPP2_PRS_L3_BROAD_CAST:
+               mask = MVPP2_PRS_IPV4_BC_MASK;
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+                                        MVPP2_PRS_RI_L3_ADDR_MASK);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Set entries for protocols over IPv6  */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
+                              unsigned int ri, unsigned int ri_mask)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+           (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+               return -EINVAL;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct ipv6hdr) - 6,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Write HW */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+               return -EINVAL;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Shift back to IPv6 NH */
+       mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+                                    MVPP2_PRS_IPV6_MC_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
+                                  int lu_max, int offset)
+{
+       u32 val;
+
+       /* Set lookup ID */
+       val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
+       val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+       val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+       mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+       /* Set maximum number of loops for packet received from port */
+       val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
+       val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+       val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+       mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+       /* Set initial offset for packet header extraction for the first
+        * searching loop
+        */
+       val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
+       val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+       val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+       mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int port;
+
+       for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+               pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Set flow ID*/
+               mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+               mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+               /* Update shadow table and hw entry */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+               mvpp2_prs_hw_write(priv, &pe);
+       }
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+       pe.index = MVPP2_PE_MH_DEFAULT;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+       /* Non-promiscuous mode for all ports - DROP unknown packets */
+       pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_DROP_MASK);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* place holders only - no ports */
+       mvpp2_prs_mac_drop_all_set(priv, 0, false);
+       mvpp2_prs_mac_promisc_set(priv, 0, false);
+       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
+       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       /* None tagged EDSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+                             MVPP2_PRS_EDSA);
+
+       /* Tagged EDSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+       /* None tagged DSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+                             MVPP2_PRS_DSA);
+
+       /* Tagged DSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+       /* None tagged EDSA ethertype entry - place holder*/
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+                                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+       /* Tagged EDSA ethertype entry - place holder*/
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+                                       MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+       /* None tagged DSA ethertype entry */
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+                                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+       /* Tagged DSA ethertype entry */
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+                                       MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+       /* Set default entry, in case DSA or EDSA tag not found */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+       pe.index = MVPP2_PE_DSA_DEFAULT;
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+       /* Shift 0 bytes */
+       mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+       /* Clear all sram ai bits for next iteration */
+       mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       /* Ethertype: PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+                                MVPP2_PRS_RI_PPPOE_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+                               MVPP2_PRS_RI_PPPOE_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: ARP */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: LBTD */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                MVPP2_PRS_RI_CPU_CODE_MASK |
+                                MVPP2_PRS_RI_UDF3_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                               MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                               MVPP2_PRS_RI_CPU_CODE_MASK |
+                               MVPP2_PRS_RI_UDF3_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv4 without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+                                    MVPP2_PRS_IPV4_HEAD_MASK |
+                                    MVPP2_PRS_IPV4_IHL_MASK);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IP header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv4 with options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+
+       /* Clear tcam data before updating */
+       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD,
+                                    MVPP2_PRS_IPV4_HEAD_MASK);
+
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv6 without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+       /* Skip DIP of IPV6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+                                MVPP2_MAX_L3_ADDR_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset even it's unknown L3 */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int err;
+
+       priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+                                             MVPP2_PRS_DBL_VLANS_MAX,
+                                             GFP_KERNEL);
+       if (!priv->prs_double_vlans)
+               return -ENOMEM;
+
+       /* Double VLAN: 0x8100, 0x88A8 */
+       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+                                       MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Double VLAN: 0x8100, 0x8100 */
+       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
+                                       MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Single VLAN: 0x88a8 */
+       err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+                                MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Single VLAN: 0x8100 */
+       err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+                                MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Set default double vlan entry */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+       pe.index = MVPP2_PE_VLAN_DBL;
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+       /* Clear ai for next iterations */
+       mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+                                MVPP2_PRS_RI_VLAN_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+                                MVPP2_PRS_DBL_VLAN_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Set default vlan none entry */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+       pe.index = MVPP2_PE_VLAN_NONE;
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                MVPP2_PRS_RI_VLAN_MASK);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       /* IPv4 over PPPoE with options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IP header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* IPv4 over PPPoE without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+                                    MVPP2_PRS_IPV4_HEAD_MASK |
+                                    MVPP2_PRS_IPV4_IHL_MASK);
+
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* IPv6 over PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IPv6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Non-IP over PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       /* Set L3 offset even if it's unknown L3 */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int err;
+
+       /* Set entries for TCP, UDP and IGMP over IPv4 */
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
+                                 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                 MVPP2_PRS_RI_CPU_CODE_MASK |
+                                 MVPP2_PRS_RI_UDF3_MASK);
+       if (err)
+               return err;
+
+       /* IPv4 Broadcast */
+       err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
+       if (err)
+               return err;
+
+       /* IPv4 Multicast */
+       err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+       if (err)
+               return err;
+
+       /* Default IPv4 entry for unknown protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+       /* Set next lu to IPv4 */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L4 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct iphdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv4 entry for unicast address */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, err;
+
+       /* Set entries for TCP, UDP and ICMP over IPv6 */
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
+                                 MVPP2_PRS_RI_L4_TCP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
+                                 MVPP2_PRS_RI_L4_UDP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
+                                 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                 MVPP2_PRS_RI_CPU_CODE_MASK |
+                                 MVPP2_PRS_RI_UDF3_MASK);
+       if (err)
+               return err;
+
+       /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+       /* Result Info: UDF7=1, DS lite */
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
+                                 MVPP2_PRS_RI_UDF7_IP6_LITE,
+                                 MVPP2_PRS_RI_UDF7_MASK);
+       if (err)
+               return err;
+
+       /* IPv6 multicast */
+       err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+       if (err)
+               return err;
+
+       /* Entry for checking hop limit */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+                                MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_L3_PROTO_MASK |
+                                MVPP2_PRS_RI_DROP_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unknown protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+       /* Set L4 offset relatively to our current place */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct ipv6hdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unknown ext protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unicast address */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+       /* Finished: go to IPv6 again */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Shift back to IPV6 NH */
+       mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Parser default initialization */
+static int mvpp2_prs_default_init(struct platform_device *pdev,
+                                 struct mvpp2 *priv)
+{
+       int err, index, i;
+
+       /* Enable tcam table */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+       /* Clear all tcam and sram entries */
+       for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+               mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+               for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+                       mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+               mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
+               for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+                       mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+       }
+
+       /* Invalidate all tcam entries */
+       for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+               mvpp2_prs_hw_inv(priv, index);
+
+       priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+                                       sizeof(struct mvpp2_prs_shadow),
+                                       GFP_KERNEL);
+       if (!priv->prs_shadow)
+               return -ENOMEM;
+
+       /* Always start from lookup = 0 */
+       for (index = 0; index < MVPP2_MAX_PORTS; index++)
+               mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+                                      MVPP2_PRS_PORT_LU_MAX, 0);
+
+       mvpp2_prs_def_flow_init(priv);
+
+       mvpp2_prs_mh_init(priv);
+
+       mvpp2_prs_mac_init(priv);
+
+       mvpp2_prs_dsa_init(priv);
+
+       err = mvpp2_prs_etype_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_vlan_init(pdev, priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_pppoe_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_init(priv);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+                                      const u8 *da, unsigned char *mask)
+{
+       unsigned char tcam_byte, tcam_mask;
+       int index;
+
+       for (index = 0; index < ETH_ALEN; index++) {
+               mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+               if (tcam_mask != mask[index])
+                       return false;
+
+               if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+                       return false;
+       }
+
+       return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static struct mvpp2_prs_entry *
+mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+                           unsigned char *mask, int udf_type)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+
+       /* Go through the all entires with MVPP2_PRS_LU_MAC */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int entry_pmap;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+                   (priv->prs_shadow[tid].udf != udf_type))
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+               entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
+
+               if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+                   entry_pmap == pmap)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Update parser's mac da entry */
+static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
+                                  const u8 *da, bool add)
+{
+       struct mvpp2_prs_entry *pe;
+       unsigned int pmap, len, ri;
+       unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+       int tid;
+
+       /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+       pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
+                                        MVPP2_PRS_UDF_MAC_DEF);
+
+       /* No such entry */
+       if (!pe) {
+               if (!add)
+                       return 0;
+
+               /* Create new TCAM entry */
+               /* Find first range mac entry*/
+               for (tid = MVPP2_PE_FIRST_FREE_TID;
+                    tid <= MVPP2_PE_LAST_FREE_TID; tid++)
+                       if (priv->prs_shadow[tid].valid &&
+                           (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
+                           (priv->prs_shadow[tid].udf ==
+                                                      MVPP2_PRS_UDF_MAC_RANGE))
+                               break;
+
+               /* Go through the all entries from first to last */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                               tid - 1);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -1;
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+               pe->index = tid;
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(pe, port, add);
+
+       /* Invalidate the entry if no ports are left enabled */
+       pmap = mvpp2_prs_tcam_port_map_get(pe);
+       if (pmap == 0) {
+               if (add) {
+                       kfree(pe);
+                       return -1;
+               }
+               mvpp2_prs_hw_inv(priv, pe->index);
+               priv->prs_shadow[pe->index].valid = false;
+               kfree(pe);
+               return 0;
+       }
+
+       /* Continue - set next lookup */
+       mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+       /* Set match on DA */
+       len = ETH_ALEN;
+       while (len--)
+               mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+       /* Set result info bits */
+       if (is_broadcast_ether_addr(da))
+               ri = MVPP2_PRS_RI_L2_BCAST;
+       else if (is_multicast_ether_addr(da))
+               ri = MVPP2_PRS_RI_L2_MCAST;
+       else
+               ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+
+       mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+                                MVPP2_PRS_RI_MAC_ME_MASK);
+       mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+                               MVPP2_PRS_RI_MAC_ME_MASK);
+
+       /* Shift to ethertype */
+       mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       /* Update shadow table and hw entry */
+       priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+       mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+
+       return 0;
+}
+
+static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int err;
+
+       /* Remove old parser entry */
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
+                                     false);
+       if (err)
+               return err;
+
+       /* Add new parser entry */
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
+       if (err)
+               return err;
+
+       /* Set addr in the device */
+       ether_addr_copy(dev->dev_addr, da);
+
+       return 0;
+}
+
+/* Delete all port's multicast simple (not range) entries */
+static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
+{
+       struct mvpp2_prs_entry pe;
+       int index, tid;
+
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+               if (!priv->prs_shadow[tid].valid ||
+                   (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+                   (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+                       continue;
+
+               /* Only simple mac entries */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+
+               /* Read mac addr from entry */
+               for (index = 0; index < ETH_ALEN; index++)
+                       mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+                                                    &da_mask[index]);
+
+               if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
+                       /* Delete this entry */
+                       mvpp2_prs_mac_da_accept(priv, port, da, false);
+       }
+}
+
+static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+{
+       switch (type) {
+       case MVPP2_TAG_TYPE_EDSA:
+               /* Add port to EDSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               /* Remove port from DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               break;
+
+       case MVPP2_TAG_TYPE_DSA:
+               /* Add port to DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               /* Remove port from EDSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               break;
+
+       case MVPP2_TAG_TYPE_MH:
+       case MVPP2_TAG_TYPE_NONE:
+               /* Remove port form EDSA and DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               break;
+
+       default:
+               if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Set prs flow for the port */
+static int mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = mvpp2_prs_flow_find(port->priv, port->id);
+
+       /* Such entry not exist */
+       if (!pe) {
+               /* Go through the all entires from last to first */
+               tid = mvpp2_prs_tcam_first_free(port->priv,
+                                               MVPP2_PE_LAST_FREE_TID,
+                                              MVPP2_PE_FIRST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+               pe->index = tid;
+
+               /* Set flow ID*/
+               mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
+       }
+
+       mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
+       mvpp2_prs_hw_write(port->priv, pe);
+       kfree(pe);
+
+       return 0;
+}
+
+/* Classifier configuration routines */
+
+/* Update classification flow table registers */
+static void mvpp2_cls_flow_write(struct mvpp2 *priv,
+                                struct mvpp2_cls_flow_entry *fe)
+{
+       mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
+                                  struct mvpp2_cls_lookup_entry *le)
+{
+       u32 val;
+
+       val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+       mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+       mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Classifier default initialization */
+static void mvpp2_cls_init(struct mvpp2 *priv)
+{
+       struct mvpp2_cls_lookup_entry le;
+       struct mvpp2_cls_flow_entry fe;
+       int index;
+
+       /* Enable classifier */
+       mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+       /* Clear classifier flow table */
+       memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+       for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+               fe.index = index;
+               mvpp2_cls_flow_write(priv, &fe);
+       }
+
+       /* Clear classifier lookup table */
+       le.data = 0;
+       for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+               le.lkpid = index;
+               le.way = 0;
+               mvpp2_cls_lookup_write(priv, &le);
+
+               le.way = 1;
+               mvpp2_cls_lookup_write(priv, &le);
+       }
+}
+
+static void mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+       struct mvpp2_cls_lookup_entry le;
+       u32 val;
+
+       /* Set way for the port */
+       val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
+       val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
+
+       /* Pick the entry to be accessed in lookup ID decoding table
+        * according to the way and lkpid.
+        */
+       le.lkpid = port->id;
+       le.way = 0;
+       le.data = 0;
+
+       /* Set initial CPU queue for receiving packets */
+       le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+       le.data |= port->first_rxq;
+
+       /* Disable classification engines */
+       le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+       /* Update lookup ID table entry */
+       mvpp2_cls_lookup_write(port->priv, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+                   port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+       mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
+                   (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+       val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
+       val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+                               struct mvpp2 *priv,
+                               struct mvpp2_bm_pool *bm_pool, int size)
+{
+       int size_bytes;
+       u32 val;
+
+       size_bytes = sizeof(u32) * size;
+       bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+                                               &bm_pool->phys_addr,
+                                               GFP_KERNEL);
+       if (!bm_pool->virt_addr)
+               return -ENOMEM;
+
+       if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+               dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+                                 bm_pool->phys_addr);
+               dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+                       bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+               return -ENOMEM;
+       }
+
+       mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+                   bm_pool->phys_addr);
+       mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+       val |= MVPP2_BM_START_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+       bm_pool->type = MVPP2_BM_FREE;
+       bm_pool->size = size;
+       bm_pool->pkt_size = 0;
+       bm_pool->buf_num = 0;
+       atomic_set(&bm_pool->in_use, 0);
+       spin_lock_init(&bm_pool->lock);
+
+       return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
+                                     struct mvpp2_bm_pool *bm_pool,
+                                     int buf_size)
+{
+       u32 val;
+
+       bm_pool->buf_size = buf_size;
+
+       val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+       mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+/* Free "num" buffers from the pool */
+static int mvpp2_bm_bufs_free(struct mvpp2 *priv,
+                             struct mvpp2_bm_pool *bm_pool, int num)
+{
+       int i;
+
+       if (num >= bm_pool->buf_num)
+               /* Free all buffers from the pool */
+               num = bm_pool->buf_num;
+
+       for (i = 0; i < num; i++) {
+               u32 vaddr;
+
+               /* Get buffer virtual adress (indirect access) */
+               mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+               vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+               if (!vaddr)
+                       break;
+               dev_kfree_skb_any((struct sk_buff *)vaddr);
+       }
+
+       /* Update BM driver with number of buffers removed from pool */
+       bm_pool->buf_num -= i;
+       return i;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+                                struct mvpp2 *priv,
+                                struct mvpp2_bm_pool *bm_pool)
+{
+       int num;
+       u32 val;
+
+       num = mvpp2_bm_bufs_free(priv, bm_pool, bm_pool->buf_num);
+       if (num != bm_pool->buf_num) {
+               WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+               return 0;
+       }
+
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+       val |= MVPP2_BM_STOP_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+       dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+                         bm_pool->virt_addr,
+                         bm_pool->phys_addr);
+       return 0;
+}
+
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+                              struct mvpp2 *priv)
+{
+       int i, err, size;
+       struct mvpp2_bm_pool *bm_pool;
+
+       /* Create all pools with maximum size */
+       size = MVPP2_BM_POOL_SIZE_MAX;
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               bm_pool = &priv->bm_pools[i];
+               bm_pool->id = i;
+               err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+               if (err)
+                       goto err_unroll_pools;
+               mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
+       }
+       return 0;
+
+err_unroll_pools:
+       dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+       for (i = i - 1; i >= 0; i--)
+               mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+       return err;
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       int i, err;
+
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               /* Mask BM all interrupts */
+               mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
+               /* Clear BM cause register */
+               mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+       }
+
+       /* Allocate and initialize BM pools */
+       priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+                                    sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+       if (!priv->bm_pools)
+               return -ENOMEM;
+
+       err = mvpp2_bm_pools_init(pdev, priv);
+       if (err < 0)
+               return err;
+       return 0;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
+                                   int lrxq, int long_pool)
+{
+       u32 val;
+       int prxq;
+
+       /* Get queue physical ID */
+       prxq = port->rxqs[lrxq]->id;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+       val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
+                   MVPP2_RXQ_POOL_LONG_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
+                                    int lrxq, int short_pool)
+{
+       u32 val;
+       int prxq;
+
+       /* Get queue physical ID */
+       prxq = port->rxqs[lrxq]->id;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+       val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
+                   MVPP2_RXQ_POOL_SHORT_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Allocate skb for BM pool */
+static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
+                                      struct mvpp2_bm_pool *bm_pool,
+                                      dma_addr_t *buf_phys_addr,
+                                      gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+       dma_addr_t phys_addr;
+
+       skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
+       if (!skb)
+               return NULL;
+
+       phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
+                                  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+                                   DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+       *buf_phys_addr = phys_addr;
+
+       return skb;
+}
+
+/* Set pool number in a BM cookie */
+static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
+{
+       u32 bm;
+
+       bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
+       bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
+
+       return bm;
+}
+
+/* Get pool number from a BM cookie */
+static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+{
+       return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
+                                    u32 buf_phys_addr, u32 buf_virt_addr)
+{
+       mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
+       mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
+}
+
+/* Release multicast buffer */
+static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
+                                u32 buf_phys_addr, u32 buf_virt_addr,
+                                int mc_id)
+{
+       u32 val = 0;
+
+       val |= (mc_id & MVPP2_BM_MC_ID_MASK);
+       mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+
+       mvpp2_bm_pool_put(port, pool,
+                         buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
+                         buf_virt_addr);
+}
+
+/* Refill BM pool */
+static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+                             u32 phys_addr, u32 cookie)
+{
+       int pool = mvpp2_bm_cookie_pool_get(bm);
+
+       mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
+                            struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+       struct sk_buff *skb;
+       int i, buf_size, total_size;
+       u32 bm;
+       dma_addr_t phys_addr;
+
+       buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+       total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+       if (buf_num < 0 ||
+           (buf_num + bm_pool->buf_num > bm_pool->size)) {
+               netdev_err(port->dev,
+                          "cannot allocate %d buffers for pool %d\n",
+                          buf_num, bm_pool->id);
+               return 0;
+       }
+
+       bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
+       for (i = 0; i < buf_num; i++) {
+               skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+               if (!skb)
+                       break;
+
+               mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+       }
+
+       /* Update BM driver with number of buffers added to pool */
+       bm_pool->buf_num += i;
+       bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+       netdev_dbg(port->dev,
+                  "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+                  bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+                  bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+       netdev_dbg(port->dev,
+                  "%s pool %d: %d of %d buffers added\n",
+                  bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+                  bm_pool->id, i, buf_num);
+       return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
+                 int pkt_size)
+{
+       unsigned long flags = 0;
+       struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+       int num;
+
+       if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
+               netdev_err(port->dev, "mixing pool types is forbidden\n");
+               return NULL;
+       }
+
+       spin_lock_irqsave(&new_pool->lock, flags);
+
+       if (new_pool->type == MVPP2_BM_FREE)
+               new_pool->type = type;
+
+       /* Allocate buffers in case BM pool is used as long pool, but packet
+        * size doesn't match MTU or BM pool hasn't being used yet
+        */
+       if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
+           (new_pool->pkt_size == 0)) {
+               int pkts_num;
+
+               /* Set default buffer number or free all the buffers in case
+                * the pool is not empty
+                */
+               pkts_num = new_pool->buf_num;
+               if (pkts_num == 0)
+                       pkts_num = type == MVPP2_BM_SWF_LONG ?
+                                  MVPP2_BM_LONG_BUF_NUM :
+                                  MVPP2_BM_SHORT_BUF_NUM;
+               else
+                       mvpp2_bm_bufs_free(port->priv, new_pool, pkts_num);
+
+               new_pool->pkt_size = pkt_size;
+
+               /* Allocate buffers for this pool */
+               num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+               if (num != pkts_num) {
+                       WARN(1, "pool %d: %d of %d allocated\n",
+                            new_pool->id, num, pkts_num);
+                       /* We need to undo the bufs_add() allocations */
+                       spin_unlock_irqrestore(&new_pool->lock, flags);
+                       return NULL;
+               }
+       }
+
+       mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+                                 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+       spin_unlock_irqrestore(&new_pool->lock, flags);
+
+       return new_pool;
+}
+
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+       unsigned long flags = 0;
+       int rxq;
+
+       if (!port->pool_long) {
+               port->pool_long =
+                      mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
+                                        MVPP2_BM_SWF_LONG,
+                                        port->pkt_size);
+               if (!port->pool_long)
+                       return -ENOMEM;
+
+               spin_lock_irqsave(&port->pool_long->lock, flags);
+               port->pool_long->port_map |= (1 << port->id);
+               spin_unlock_irqrestore(&port->pool_long->lock, flags);
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+       }
+
+       if (!port->pool_short) {
+               port->pool_short =
+                       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
+                                         MVPP2_BM_SWF_SHORT,
+                                         MVPP2_BM_SHORT_PKT_SIZE);
+               if (!port->pool_short)
+                       return -ENOMEM;
+
+               spin_lock_irqsave(&port->pool_short->lock, flags);
+               port->pool_short->port_map |= (1 << port->id);
+               spin_unlock_irqrestore(&port->pool_short->lock, flags);
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       mvpp2_rxq_short_pool_set(port, rxq,
+                                                port->pool_short->id);
+       }
+
+       return 0;
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_bm_pool *port_pool = port->pool_long;
+       int num, pkts_num = port_pool->buf_num;
+       int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+       /* Update BM pool with new buffer size */
+       num = mvpp2_bm_bufs_free(port->priv, port_pool, pkts_num);
+       if (num != pkts_num) {
+               WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
+               return -EIO;
+       }
+
+       port_pool->pkt_size = pkt_size;
+       num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
+       if (num != pkts_num) {
+               WARN(1, "pool %d: %d of %d allocated\n",
+                    port_pool->id, num, pkts_num);
+               return -EIO;
+       }
+
+       mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
+                                 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
+       dev->mtu = mtu;
+       netdev_update_features(dev);
+       return 0;
+}
+
+static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
+{
+       int cpu, cpu_mask = 0;
+
+       for_each_present_cpu(cpu)
+               cpu_mask |= 1 << cpu;
+       mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+                   MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
+}
+
+static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
+{
+       int cpu, cpu_mask = 0;
+
+       for_each_present_cpu(cpu)
+               cpu_mask |= 1 << cpu;
+       mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+                   MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_mask(void *arg)
+{
+       struct mvpp2_port *port = arg;
+
+       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+       struct mvpp2_port *port = arg;
+
+       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
+                   (MVPP2_CAUSE_MISC_SUM_MASK |
+                    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
+                    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+}
+
+/* Port configuration routines */
+
+static void mvpp2_port_mii_set(struct mvpp2_port *port)
+{
+       u32 reg, val = 0;
+
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               val = MVPP2_GMAC_PCS_ENABLE_MASK |
+                     MVPP2_GMAC_INBAND_AN_MASK;
+       else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               val = MVPP2_GMAC_PORT_RGMII_MASK;
+
+       reg = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+       writel(reg | val, port->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val |= MVPP2_GMAC_PORT_EN_MASK;
+       val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+                   ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+       if (port->speed == 1000)
+               val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+       else
+               val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+       else
+               val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+       writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+static void mvpp2_port_reset(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+                   ~MVPP2_GMAC_PORT_RESET_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+              MVPP2_GMAC_PORT_RESET_MASK)
+               continue;
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+       val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+                   MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *port)
+{
+       int tx_port_num, val, queue, ptxq, lrxq;
+
+       /* Configure port to loopback if needed */
+       if (port->flags & MVPP2_F_LOOPBACK)
+               mvpp2_port_loopback_set(port);
+
+       /* Update TX FIFO MIN Threshold */
+       val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+       val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+       /* Min. TX threshold must be less than minimal packet length */
+       val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+       writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+       /* Disable Legacy WRR, Disable EJP, Release from reset */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+                   tx_port_num);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+       /* Close bandwidth for all queues */
+       for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+               ptxq = mvpp2_txq_phys(port->id, queue);
+               mvpp2_write(port->priv,
+                           MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+       }
+
+       /* Set refill period to 1 usec, refill tokens
+        * and bucket size to maximum
+        */
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
+                   port->priv->tclk / USEC_PER_SEC);
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
+       val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+       val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+       val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
+       val = MVPP2_TXP_TOKEN_SIZE_MAX;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+       /* Set MaximumLowLatencyPacketSize value to 256 */
+       mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
+                   MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+                   MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+       /* Enable Rx cache snoop */
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+                          MVPP2_SNOOP_BUF_HDR_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+
+       /* At default, mask all interrupts to all present cpus */
+       mvpp2_interrupts_disable(port);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+       u32 val;
+       int lrxq, queue;
+
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val &= ~MVPP2_RXQ_DISABLE_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+}
+
+static void mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+       u32 val;
+       int lrxq, queue;
+
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val |= MVPP2_RXQ_DISABLE_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_egress_enable(struct mvpp2_port *port)
+{
+       u32 qmap;
+       int queue;
+       int tx_port_num = mvpp2_egress_port(port);
+
+       /* Enable all initialized TXs. */
+       qmap = 0;
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               if (txq->descs != NULL)
+                       qmap |= (1 << queue);
+       }
+
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_egress_disable(struct mvpp2_port *port)
+{
+       u32 reg_data;
+       int delay;
+       int tx_port_num = mvpp2_egress_port(port);
+
+       /* Issue stop command for active channels only */
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+       reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+                   MVPP2_TXP_SCHED_ENQ_MASK;
+       if (reg_data != 0)
+               mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
+                           (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+       /* Wait for all Tx activity to terminate. */
+       delay = 0;
+       do {
+               if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+                       netdev_warn(port->dev,
+                                   "Tx stop timed out, status=0x%08x\n",
+                                   reg_data);
+                       break;
+               }
+               mdelay(1);
+               delay++;
+
+               /* Check port TX Command register that all
+                * Tx queues are stopped
+                */
+               reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+       } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+       u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+       return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+                       int used_count, int free_count)
+{
+       /* Decrement the number of used descriptors and increment count
+        * increment the number of free descriptors.
+        */
+       u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+       int rx_desc = rxq->next_desc_to_proc;
+
+       rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+       prefetch(rxq->descs + rxq->next_desc_to_proc);
+       return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
+                                int prxq, int offset)
+{
+       u32 val;
+
+       /* Convert offset from bytes to units of 32 bytes */
+       offset = offset >> 5;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+       /* Offset is in */
+       val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+                   MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Obtain BM cookie information from descriptor */
+static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+{
+       int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                  MVPP2_RXD_BM_POOL_ID_OFFS;
+       int cpu = smp_processor_id();
+
+       return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
+              ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
+                                      struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+
+       return val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+       int tx_desc = txq->next_desc_to_proc;
+
+       txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+       return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
+{
+       /* aggregated access - relevant TXQ number is written in TX desc */
+       mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+                                    struct mvpp2_tx_queue *aggr_txq, int num)
+{
+       if ((aggr_txq->count + num) > aggr_txq->size) {
+               /* Update number of occupied aggregated Tx descriptors */
+               int cpu = smp_processor_id();
+               u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+               aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+       }
+
+       if ((aggr_txq->count + num) > aggr_txq->size)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+                                        struct mvpp2_tx_queue *txq, int num)
+{
+       u32 val;
+
+       val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+       mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+
+       val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+
+       return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission.
+ * If not, request chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+                                           struct mvpp2_tx_queue *txq,
+                                           struct mvpp2_txq_pcpu *txq_pcpu,
+                                           int num)
+{
+       int req, cpu, desc_count;
+
+       if (txq_pcpu->reserved_num >= num)
+               return 0;
+
+       /* Not enough descriptors reserved! Update the reserved descriptor
+        * count and check again.
+        */
+
+       desc_count = 0;
+       /* Compute total of used descriptors */
+       for_each_present_cpu(cpu) {
+               struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+               txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+               desc_count += txq_pcpu_aux->count;
+               desc_count += txq_pcpu_aux->reserved_num;
+       }
+
+       req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+       desc_count += req;
+
+       if (desc_count >
+          (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+               return -ENOMEM;
+
+       txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+
+       /* OK, the descriptor cound has been updated: check again. */
+       if (txq_pcpu->reserved_num < num)
+               return -ENOMEM;
+       return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+       if (txq->next_desc_to_proc == 0)
+               txq->next_desc_to_proc = txq->last_desc - 1;
+       else
+               txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+                              int ip_hdr_len, int l4_proto)
+{
+       u32 command;
+
+       /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+        * G_L4_chk, L4_type required only for checksum calculation
+        */
+       command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+       command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+       command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+       if (l3_proto == swab16(ETH_P_IP)) {
+               command &= ~MVPP2_TXD_IP_CSUM_DISABLE;  /* enable IPv4 csum */
+               command &= ~MVPP2_TXD_L3_IP6;           /* enable IPv4 */
+       } else {
+               command |= MVPP2_TXD_L3_IP6;            /* enable IPv6 */
+       }
+
+       if (l4_proto == IPPROTO_TCP) {
+               command &= ~MVPP2_TXD_L4_UDP;           /* enable TCP */
+               command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
+       } else if (l4_proto == IPPROTO_UDP) {
+               command |= MVPP2_TXD_L4_UDP;            /* enable UDP */
+               command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
+       } else {
+               command |= MVPP2_TXD_L4_CSUM_NOT;
+       }
+
+       return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
+                                          struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+
+       /* Reading status reg resets transmitted descriptor counter */
+       val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+
+       return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+               MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       int queue;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               int id = port->txqs[queue]->id;
+
+               mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+       }
+}
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+       u32     val, size, mtu;
+       int     txq, tx_port_num;
+
+       mtu = port->pkt_size * 8;
+       if (mtu > MVPP2_TXP_MTU_MAX)
+               mtu = MVPP2_TXP_MTU_MAX;
+
+       /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+       mtu = 3 * mtu;
+
+       /* Indirect access to registers */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+       /* Set MTU */
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
+       val &= ~MVPP2_TXP_MTU_MAX;
+       val |= mtu;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
+
+       /* TXP token size and all TXQs token size must be larger that MTU */
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+       size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+       if (size < mtu) {
+               size = mtu;
+               val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+               val |= size;
+               mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+       }
+
+       for (txq = 0; txq < txq_number; txq++) {
+               val = mvpp2_read(port->priv,
+                                MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+               size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+               if (size < mtu) {
+                       size = mtu;
+                       val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+                       val |= size;
+                       mvpp2_write(port->priv,
+                                   MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+                                   val);
+               }
+       }
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
+                                  struct mvpp2_rx_queue *rxq, u32 pkts)
+{
+       u32 val;
+
+       val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+
+       rxq->pkts_coal = pkts;
+}
+
+/* Set the time delay in usec before Rx interrupt */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
+                                  struct mvpp2_rx_queue *rxq, u32 usec)
+{
+       u32 val;
+
+       val = (port->priv->tclk / USEC_PER_SEC) * usec;
+       mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+
+       rxq->time_coal = usec;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvpp2_tx_done_pkts_coal_set(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       int queue;
+       u32 val;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
+                      MVPP2_TRANSMITTED_THRESH_MASK;
+               mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+               mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
+       }
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+                               struct mvpp2_tx_queue *txq,
+                               struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+       int i;
+
+       for (i = 0; i < num; i++) {
+               struct mvpp2_tx_desc *tx_desc = txq->descs +
+                                                       txq_pcpu->txq_get_index;
+               struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+
+               mvpp2_txq_inc_get(txq_pcpu);
+
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
+                                tx_desc->data_size, DMA_TO_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
+                                                       u32 cause)
+{
+       int queue = fls(cause) - 1;
+
+       return port->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
+                                                       u32 cause)
+{
+       int queue = fls(cause >> 16) - 1;
+
+       return port->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+                          struct mvpp2_txq_pcpu *txq_pcpu)
+{
+       struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
+       int tx_done;
+
+       if (txq_pcpu->cpu != smp_processor_id())
+               netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
+
+       tx_done = mvpp2_txq_sent_desc_proc(port, txq);
+       if (!tx_done)
+               return;
+       mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
+
+       txq_pcpu->count -= tx_done;
+
+       if (netif_tx_queue_stopped(nq))
+               if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
+                       netif_tx_wake_queue(nq);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+                              struct mvpp2_tx_queue *aggr_txq,
+                              int desc_num, int cpu,
+                              struct mvpp2 *priv)
+{
+       /* Allocate memory for TX descriptors */
+       aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+                               desc_num * MVPP2_DESC_ALIGNED_SIZE,
+                               &aggr_txq->descs_phys, GFP_KERNEL);
+       if (!aggr_txq->descs)
+               return -ENOMEM;
+
+       /* Make sure descriptor address is cache line size aligned  */
+       BUG_ON(aggr_txq->descs !=
+              PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       aggr_txq->last_desc = aggr_txq->size - 1;
+
+       /* Aggr TXQ no reset WA */
+       aggr_txq->next_desc_to_proc = mvpp2_read(priv,
+                                                MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+
+       /* Set Tx descriptors queue starting address */
+       /* indirect access */
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
+                   aggr_txq->descs_phys);
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
+
+       return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *port,
+                         struct mvpp2_rx_queue *rxq)
+
+{
+       rxq->size = port->rx_ring_size;
+
+       /* Allocate memory for RX descriptors */
+       rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
+                                       rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                       &rxq->descs_phys, GFP_KERNEL);
+       if (!rxq->descs)
+               return -ENOMEM;
+
+       BUG_ON(rxq->descs !=
+              PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       rxq->last_desc = rxq->size - 1;
+
+       /* Zero occupied and non-occupied counters - direct access */
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+       /* Set Rx descriptors queue starting address - indirect access */
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+       mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+
+       /* Set Offset */
+       mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
+
+       /* Set coalescing pkts and time */
+       mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+       mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+
+       /* Add number of descriptors ready for receiving packets */
+       mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+       return 0;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
+                               struct mvpp2_rx_queue *rxq)
+{
+       int rx_received, i;
+
+       rx_received = mvpp2_rxq_received(port, rxq->id);
+       if (!rx_received)
+               return;
+
+       for (i = 0; i < rx_received; i++) {
+               struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+               u32 bm = mvpp2_bm_cookie_build(rx_desc);
+
+               mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+                                 rx_desc->buf_cookie);
+       }
+       mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *port,
+                            struct mvpp2_rx_queue *rxq)
+{
+       mvpp2_rxq_drop_pkts(port, rxq);
+
+       if (rxq->descs)
+               dma_free_coherent(port->dev->dev.parent,
+                                 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                 rxq->descs,
+                                 rxq->descs_phys);
+
+       rxq->descs             = NULL;
+       rxq->last_desc         = 0;
+       rxq->next_desc_to_proc = 0;
+       rxq->descs_phys        = 0;
+
+       /* Clear Rx descriptors queue starting address and size;
+        * free descriptor number
+        */
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *port,
+                         struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+       int cpu, desc, desc_per_txq, tx_port_num;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+
+       txq->size = port->tx_ring_size;
+
+       /* Allocate memory for Tx descriptors */
+       txq->descs = dma_alloc_coherent(port->dev->dev.parent,
+                               txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                               &txq->descs_phys, GFP_KERNEL);
+       if (!txq->descs)
+               return -ENOMEM;
+
+       /* Make sure descriptor address is cache line size aligned  */
+       BUG_ON(txq->descs !=
+              PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       txq->last_desc = txq->size - 1;
+
+       /* Set Tx descriptors queue starting address - indirect access */
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
+                                            MVPP2_TXQ_DESC_SIZE_MASK);
+       mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
+       mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
+                   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+       val &= ~MVPP2_TXQ_PENDING_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+
+       /* Calculate base address in prefetch buffer. We reserve 16 descriptors
+        * for each existing TXQ.
+        * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+        * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+        */
+       desc_per_txq = 16;
+       desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
+              (txq->log_id * desc_per_txq);
+
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
+                   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+                   MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+
+       /* WRR / EJP configuration - indirect access */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+       val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+       val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+       val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+       val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
+
+       val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+                   val);
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               txq_pcpu->size = txq->size;
+               txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
+                                          sizeof(*txq_pcpu->tx_skb),
+                                          GFP_KERNEL);
+               if (!txq_pcpu->tx_skb) {
+                       dma_free_coherent(port->dev->dev.parent,
+                                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                         txq->descs, txq->descs_phys);
+                       return -ENOMEM;
+               }
+
+               txq_pcpu->count = 0;
+               txq_pcpu->reserved_num = 0;
+               txq_pcpu->txq_put_index = 0;
+               txq_pcpu->txq_get_index = 0;
+       }
+
+       return 0;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *port,
+                            struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int cpu;
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               kfree(txq_pcpu->tx_skb);
+       }
+
+       if (txq->descs)
+               dma_free_coherent(port->dev->dev.parent,
+                                 txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq->descs, txq->descs_phys);
+
+       txq->descs             = NULL;
+       txq->last_desc         = 0;
+       txq->next_desc_to_proc = 0;
+       txq->descs_phys        = 0;
+
+       /* Set minimum bandwidth for disabled TXQs */
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+
+       /* Set Tx descriptors queue starting address and size */
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int delay, pending, cpu;
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+       val |= MVPP2_TXQ_DRAIN_EN_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+       /* The napi queue has been stopped so wait for all packets
+        * to be transmitted.
+        */
+       delay = 0;
+       do {
+               if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+                       netdev_warn(port->dev,
+                                   "port %d: cleaning queue %d timed out\n",
+                                   port->id, txq->log_id);
+                       break;
+               }
+               mdelay(1);
+               delay++;
+
+               pending = mvpp2_txq_pend_desc_num_get(port, txq);
+       } while (pending);
+
+       val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+               /* Release all packets */
+               mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
+
+               /* Reset queue */
+               txq_pcpu->count = 0;
+               txq_pcpu->txq_put_index = 0;
+               txq_pcpu->txq_get_index = 0;
+       }
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
+{
+       struct mvpp2_tx_queue *txq;
+       int queue;
+       u32 val;
+
+       val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
+
+       /* Reset Tx ports and delete Tx queues */
+       val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+
+       for (queue = 0; queue < txq_number; queue++) {
+               txq = port->txqs[queue];
+               mvpp2_txq_clean(port, txq);
+               mvpp2_txq_deinit(port, txq);
+       }
+
+       on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+
+       val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
+{
+       int queue;
+
+       for (queue = 0; queue < rxq_number; queue++)
+               mvpp2_rxq_deinit(port, port->rxqs[queue]);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *port)
+{
+       int queue, err;
+
+       for (queue = 0; queue < rxq_number; queue++) {
+               err = mvpp2_rxq_init(port, port->rxqs[queue]);
+               if (err)
+                       goto err_cleanup;
+       }
+       return 0;
+
+err_cleanup:
+       mvpp2_cleanup_rxqs(port);
+       return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *port)
+{
+       struct mvpp2_tx_queue *txq;
+       int queue, err;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               txq = port->txqs[queue];
+               err = mvpp2_txq_init(port, txq);
+               if (err)
+                       goto err_cleanup;
+       }
+
+       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
+       on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+       return 0;
+
+err_cleanup:
+       mvpp2_cleanup_txqs(port);
+       return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+       struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+
+       mvpp2_interrupts_disable(port);
+
+       napi_schedule(&port->napi);
+
+       return IRQ_HANDLED;
+}
+
+/* Adjust link */
+static void mvpp2_link_event(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct phy_device *phydev = port->phy_dev;
+       int status_change = 0;
+       u32 val;
+
+       if (phydev->link) {
+               if ((port->speed != phydev->speed) ||
+                   (port->duplex != phydev->duplex)) {
+                       u32 val;
+
+                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
+                                MVPP2_GMAC_CONFIG_GMII_SPEED |
+                                MVPP2_GMAC_CONFIG_FULL_DUPLEX |
+                                MVPP2_GMAC_AN_SPEED_EN |
+                                MVPP2_GMAC_AN_DUPLEX_EN);
+
+                       if (phydev->duplex)
+                               val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+                       if (phydev->speed == SPEED_1000)
+                               val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+                       else
+                               val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+                       port->duplex = phydev->duplex;
+                       port->speed  = phydev->speed;
+               }
+       }
+
+       if (phydev->link != port->link) {
+               if (!phydev->link) {
+                       port->duplex = -1;
+                       port->speed = 0;
+               }
+
+               port->link = phydev->link;
+               status_change = 1;
+       }
+
+       if (status_change) {
+               if (phydev->link) {
+                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       val |= (MVPP2_GMAC_FORCE_LINK_PASS |
+                               MVPP2_GMAC_FORCE_LINK_DOWN);
+                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       mvpp2_egress_enable(port);
+                       mvpp2_ingress_enable(port);
+               } else {
+                       mvpp2_ingress_disable(port);
+                       mvpp2_egress_disable(port);
+               }
+               phy_print_status(phydev);
+       }
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *port,
+                          struct mvpp2_rx_desc *rx_desc)
+{
+       u32 status = rx_desc->status;
+
+       switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+       case MVPP2_RXD_ERR_CRC:
+               netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       case MVPP2_RXD_ERR_OVERRUN:
+               netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       case MVPP2_RXD_ERR_RESOURCE:
+               netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       }
+}
+
+/* Handle RX checksum offload */
+static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
+                         struct sk_buff *skb)
+{
+       if (((status & MVPP2_RXD_L3_IP4) &&
+            !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+           (status & MVPP2_RXD_L3_IP6))
+               if (((status & MVPP2_RXD_L4_UDP) ||
+                    (status & MVPP2_RXD_L4_TCP)) &&
+                    (status & MVPP2_RXD_L4_CSUM_OK)) {
+                       skb->csum = 0;
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       return;
+               }
+
+       skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *port,
+                          struct mvpp2_bm_pool *bm_pool,
+                          u32 bm, int is_recycle)
+{
+       struct sk_buff *skb;
+       dma_addr_t phys_addr;
+
+       if (is_recycle &&
+           (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
+               return 0;
+
+       /* No recycle or too many buffers are in use, so allocate a new skb */
+       skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+       atomic_dec(&bm_pool->in_use);
+       return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+{
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int ip_hdr_len = 0;
+               u8 l4_proto;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *ip4h = ip_hdr(skb);
+
+                       /* Calculate IPv4 checksum and L4 checksum */
+                       ip_hdr_len = ip4h->ihl;
+                       l4_proto = ip4h->protocol;
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+                       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+                       /* Read l4_protocol from one of IPv6 extra headers */
+                       if (skb_network_header_len(skb) > 0)
+                               ip_hdr_len = (skb_network_header_len(skb) >> 2);
+                       l4_proto = ip6h->nexthdr;
+               } else {
+                       return MVPP2_TXD_L4_CSUM_NOT;
+               }
+
+               return mvpp2_txq_desc_csum(skb_network_offset(skb),
+                               skb->protocol, ip_hdr_len, l4_proto);
+       }
+
+       return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
+                             struct mvpp2_rx_desc *rx_desc)
+{
+       struct mvpp2_buff_hdr *buff_hdr;
+       struct sk_buff *skb;
+       u32 rx_status = rx_desc->status;
+       u32 buff_phys_addr;
+       u32 buff_virt_addr;
+       u32 buff_phys_addr_next;
+       u32 buff_virt_addr_next;
+       int mc_id;
+       int pool_id;
+
+       pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                  MVPP2_RXD_BM_POOL_ID_OFFS;
+       buff_phys_addr = rx_desc->buf_phys_addr;
+       buff_virt_addr = rx_desc->buf_cookie;
+
+       do {
+               skb = (struct sk_buff *)buff_virt_addr;
+               buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
+
+               mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
+
+               buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
+               buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
+
+               /* Release buffer */
+               mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
+                                    buff_virt_addr, mc_id);
+
+               buff_phys_addr = buff_phys_addr_next;
+               buff_virt_addr = buff_virt_addr_next;
+
+       } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
+                   struct mvpp2_rx_queue *rxq)
+{
+       struct net_device *dev = port->dev;
+       int rx_received, rx_filled, i;
+       u32 rcvd_pkts = 0;
+       u32 rcvd_bytes = 0;
+
+       /* Get number of received packets and clamp the to-do */
+       rx_received = mvpp2_rxq_received(port, rxq->id);
+       if (rx_todo > rx_received)
+               rx_todo = rx_received;
+
+       rx_filled = 0;
+       for (i = 0; i < rx_todo; i++) {
+               struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+               struct mvpp2_bm_pool *bm_pool;
+               struct sk_buff *skb;
+               u32 bm, rx_status;
+               int pool, rx_bytes, err;
+
+               rx_filled++;
+               rx_status = rx_desc->status;
+               rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+
+               bm = mvpp2_bm_cookie_build(rx_desc);
+               pool = mvpp2_bm_cookie_pool_get(bm);
+               bm_pool = &port->priv->bm_pools[pool];
+               /* Check if buffer header is used */
+               if (rx_status & MVPP2_RXD_BUF_HDR) {
+                       mvpp2_buff_hdr_rx(port, rx_desc);
+                       continue;
+               }
+
+               /* In case of an error, release the requested buffer pointer
+                * to the Buffer Manager. This request process is controlled
+                * by the hardware, and the information about the buffer is
+                * comprised by the RX descriptor.
+                */
+               if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+                       dev->stats.rx_errors++;
+                       mvpp2_rx_error(port, rx_desc);
+                       mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+                                         rx_desc->buf_cookie);
+                       continue;
+               }
+
+               skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+               rcvd_pkts++;
+               rcvd_bytes += rx_bytes;
+               atomic_inc(&bm_pool->in_use);
+
+               skb_reserve(skb, MVPP2_MH_SIZE);
+               skb_put(skb, rx_bytes);
+               skb->protocol = eth_type_trans(skb, dev);
+               mvpp2_rx_csum(port, rx_status, skb);
+
+               napi_gro_receive(&port->napi, skb);
+
+               err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+               if (err) {
+                       netdev_err(port->dev, "failed to refill BM pools\n");
+                       rx_filled--;
+               }
+       }
+
+       if (rcvd_pkts) {
+               struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->rx_packets += rcvd_pkts;
+               stats->rx_bytes   += rcvd_bytes;
+               u64_stats_update_end(&stats->syncp);
+       }
+
+       /* Update Rx queue management counters */
+       wmb();
+       mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
+
+       return rx_todo;
+}
+
+static inline void
+tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+                 struct mvpp2_tx_desc *desc)
+{
+       dma_unmap_single(dev, desc->buf_phys_addr,
+                        desc->data_size, DMA_TO_DEVICE);
+       mvpp2_txq_desc_put(txq);
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
+                                struct mvpp2_tx_queue *aggr_txq,
+                                struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+       struct mvpp2_tx_desc *tx_desc;
+       int i;
+       dma_addr_t buf_phys_addr;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               void *addr = page_address(frag->page.p) + frag->page_offset;
+
+               tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+               tx_desc->phys_txq = txq->id;
+               tx_desc->data_size = frag->size;
+
+               buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
+                                              tx_desc->data_size,
+                                              DMA_TO_DEVICE);
+               if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+                       mvpp2_txq_desc_put(txq);
+                       goto error;
+               }
+
+               tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+               tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+
+               if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+                       /* Last descriptor */
+                       tx_desc->command = MVPP2_TXD_L_DESC;
+                       mvpp2_txq_inc_put(txq_pcpu, skb);
+               } else {
+                       /* Descriptor in the middle: Not First, Not Last */
+                       tx_desc->command = 0;
+                       mvpp2_txq_inc_put(txq_pcpu, NULL);
+               }
+       }
+
+       return 0;
+
+error:
+       /* Release all descriptors that were used to map fragments of
+        * this packet, as well as the corresponding DMA mappings
+        */
+       for (i = i - 1; i >= 0; i--) {
+               tx_desc = txq->descs + i;
+               tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+       }
+
+       return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_tx_queue *txq, *aggr_txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       struct mvpp2_tx_desc *tx_desc;
+       dma_addr_t buf_phys_addr;
+       int frags = 0;
+       u16 txq_id;
+       u32 tx_cmd;
+
+       txq_id = skb_get_queue_mapping(skb);
+       txq = port->txqs[txq_id];
+       txq_pcpu = this_cpu_ptr(txq->pcpu);
+       aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+
+       frags = skb_shinfo(skb)->nr_frags + 1;
+
+       /* Check number of available descriptors */
+       if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
+           mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
+                                            txq_pcpu, frags)) {
+               frags = 0;
+               goto out;
+       }
+
+       /* Get a descriptor for the first part of the packet */
+       tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+       tx_desc->phys_txq = txq->id;
+       tx_desc->data_size = skb_headlen(skb);
+
+       buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+                                      tx_desc->data_size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+               mvpp2_txq_desc_put(txq);
+               frags = 0;
+               goto out;
+       }
+       tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+       tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+       tx_cmd = mvpp2_skb_tx_csum(port, skb);
+
+       if (frags == 1) {
+               /* First and Last descriptor */
+               tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+               tx_desc->command = tx_cmd;
+               mvpp2_txq_inc_put(txq_pcpu, skb);
+       } else {
+               /* First but not Last */
+               tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+               tx_desc->command = tx_cmd;
+               mvpp2_txq_inc_put(txq_pcpu, NULL);
+
+               /* Continue with other skb fragments */
+               if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
+                       tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+                       frags = 0;
+                       goto out;
+               }
+       }
+
+       txq_pcpu->reserved_num -= frags;
+       txq_pcpu->count += frags;
+       aggr_txq->count += frags;
+
+       /* Enable transmit */
+       wmb();
+       mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+       if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
+               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+               netif_tx_stop_queue(nq);
+       }
+out:
+       if (frags > 0) {
+               struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += skb->len;
+               u64_stats_update_end(&stats->syncp);
+       } else {
+               dev->stats.tx_dropped++;
+               dev_kfree_skb_any(skb);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+       if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+               netdev_err(dev, "FCS error\n");
+       if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+               netdev_err(dev, "rx fifo overrun error\n");
+       if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+               netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static void mvpp2_txq_done_percpu(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       u32 cause_rx_tx, cause_tx, cause_misc;
+
+       /* Rx/Tx cause register
+        *
+        * Bits 0-15: each bit indicates received packets on the Rx queue
+        * (bit 0 is for Rx queue 0).
+        *
+        * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+        * (bit 16 is for Tx queue 0).
+        *
+        * Each CPU has its own Rx/Tx cause register
+        */
+       cause_rx_tx = mvpp2_read(port->priv,
+                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+
+       if (cause_misc) {
+               mvpp2_cause_error(port->dev, cause_misc);
+
+               /* Clear the cause register */
+               mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
+               mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+                           cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+       }
+
+       /* Release TX descriptors */
+       if (cause_tx) {
+               struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
+               struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+               if (txq_pcpu->count)
+                       mvpp2_txq_done(port, txq, txq_pcpu);
+       }
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+       u32 cause_rx_tx, cause_rx;
+       int rx_done = 0;
+       struct mvpp2_port *port = netdev_priv(napi->dev);
+
+       on_each_cpu(mvpp2_txq_done_percpu, port, 1);
+
+       cause_rx_tx = mvpp2_read(port->priv,
+                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+       cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+       /* Process RX packets */
+       cause_rx |= port->pending_cause_rx;
+       while (cause_rx && budget > 0) {
+               int count;
+               struct mvpp2_rx_queue *rxq;
+
+               rxq = mvpp2_get_rx_queue(port, cause_rx);
+               if (!rxq)
+                       break;
+
+               count = mvpp2_rx(port, budget, rxq);
+               rx_done += count;
+               budget -= count;
+               if (budget > 0) {
+                       /* Clear the bit associated to this Rx queue
+                        * so that next iteration will continue from
+                        * the next Rx queue.
+                        */
+                       cause_rx &= ~(1 << rxq->logic_rxq);
+               }
+       }
+
+       if (budget > 0) {
+               cause_rx = 0;
+               napi_complete(napi);
+
+               mvpp2_interrupts_enable(port);
+       }
+       port->pending_cause_rx = cause_rx;
+       return rx_done;
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+       mvpp2_gmac_max_rx_size_set(port);
+       mvpp2_txp_max_tx_size_set(port);
+
+       napi_enable(&port->napi);
+
+       /* Enable interrupts on all CPUs */
+       mvpp2_interrupts_enable(port);
+
+       mvpp2_port_enable(port);
+       phy_start(port->phy_dev);
+       netif_tx_start_all_queues(port->dev);
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *port)
+{
+       /* Stop new packets from arriving to RXQs */
+       mvpp2_ingress_disable(port);
+
+       mdelay(10);
+
+       /* Disable interrupts on all CPUs */
+       mvpp2_interrupts_disable(port);
+
+       napi_disable(&port->napi);
+
+       netif_carrier_off(port->dev);
+       netif_tx_stop_all_queues(port->dev);
+
+       mvpp2_egress_disable(port);
+       mvpp2_port_disable(port);
+       phy_stop(port->phy_dev);
+}
+
+/* Return positive if MTU is valid */
+static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
+{
+       if (mtu < 68) {
+               netdev_err(dev, "cannot change mtu to less than 68\n");
+               return -EINVAL;
+       }
+
+       /* 9676 == 9700 - 20 and rounding to 8 */
+       if (mtu > 9676) {
+               netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
+               mtu = 9676;
+       }
+
+       if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+               netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+                           ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+               mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+       }
+
+       return mtu;
+}
+
+static int mvpp2_check_ringparam_valid(struct net_device *dev,
+                                      struct ethtool_ringparam *ring)
+{
+       u16 new_rx_pending = ring->rx_pending;
+       u16 new_tx_pending = ring->tx_pending;
+
+       if (ring->rx_pending == 0 || ring->tx_pending == 0)
+               return -EINVAL;
+
+       if (ring->rx_pending > MVPP2_MAX_RXD)
+               new_rx_pending = MVPP2_MAX_RXD;
+       else if (!IS_ALIGNED(ring->rx_pending, 16))
+               new_rx_pending = ALIGN(ring->rx_pending, 16);
+
+       if (ring->tx_pending > MVPP2_MAX_TXD)
+               new_tx_pending = MVPP2_MAX_TXD;
+       else if (!IS_ALIGNED(ring->tx_pending, 32))
+               new_tx_pending = ALIGN(ring->tx_pending, 32);
+
+       if (ring->rx_pending != new_rx_pending) {
+               netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
+                           ring->rx_pending, new_rx_pending);
+               ring->rx_pending = new_rx_pending;
+       }
+
+       if (ring->tx_pending != new_tx_pending) {
+               netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
+                           ring->tx_pending, new_tx_pending);
+               ring->tx_pending = new_tx_pending;
+       }
+
+       return 0;
+}
+
+static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+{
+       u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+       mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+       mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+       mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
+       addr[0] = (mac_addr_h >> 24) & 0xFF;
+       addr[1] = (mac_addr_h >> 16) & 0xFF;
+       addr[2] = (mac_addr_h >> 8) & 0xFF;
+       addr[3] = mac_addr_h & 0xFF;
+       addr[4] = mac_addr_m & 0xFF;
+       addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_phy_connect(struct mvpp2_port *port)
+{
+       struct phy_device *phy_dev;
+
+       phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
+                                port->phy_interface);
+       if (!phy_dev) {
+               netdev_err(port->dev, "cannot connect to phy\n");
+               return -ENODEV;
+       }
+       phy_dev->supported &= PHY_GBIT_FEATURES;
+       phy_dev->advertising = phy_dev->supported;
+
+       port->phy_dev = phy_dev;
+       port->link    = 0;
+       port->duplex  = 0;
+       port->speed   = 0;
+
+       return 0;
+}
+
+static void mvpp2_phy_disconnect(struct mvpp2_port *port)
+{
+       phy_disconnect(port->phy_dev);
+       port->phy_dev = NULL;
+}
+
+static int mvpp2_open(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       unsigned char mac_bcast[ETH_ALEN] = {
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+       int err;
+
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+               return err;
+       }
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id,
+                                     dev->dev_addr, true);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
+               return err;
+       }
+       err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+               return err;
+       }
+       err = mvpp2_prs_def_flow(port);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+               return err;
+       }
+
+       /* Allocate the Rx/Tx queues */
+       err = mvpp2_setup_rxqs(port);
+       if (err) {
+               netdev_err(port->dev, "cannot allocate Rx queues\n");
+               return err;
+       }
+
+       err = mvpp2_setup_txqs(port);
+       if (err) {
+               netdev_err(port->dev, "cannot allocate Tx queues\n");
+               goto err_cleanup_rxqs;
+       }
+
+       err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
+       if (err) {
+               netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
+               goto err_cleanup_txqs;
+       }
+
+       /* In default link is down */
+       netif_carrier_off(port->dev);
+
+       err = mvpp2_phy_connect(port);
+       if (err < 0)
+               goto err_free_irq;
+
+       /* Unmask interrupts on all CPUs */
+       on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+
+       mvpp2_start_dev(port);
+
+       return 0;
+
+err_free_irq:
+       free_irq(port->irq, port);
+err_cleanup_txqs:
+       mvpp2_cleanup_txqs(port);
+err_cleanup_rxqs:
+       mvpp2_cleanup_rxqs(port);
+       return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       mvpp2_stop_dev(port);
+       mvpp2_phy_disconnect(port);
+
+       /* Mask interrupts on all CPUs */
+       on_each_cpu(mvpp2_interrupts_mask, port, 1);
+
+       free_irq(port->irq, port);
+       mvpp2_cleanup_rxqs(port);
+       mvpp2_cleanup_txqs(port);
+
+       return 0;
+}
+
+static void mvpp2_set_rx_mode(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2 *priv = port->priv;
+       struct netdev_hw_addr *ha;
+       int id = port->id;
+       bool allmulti = dev->flags & IFF_ALLMULTI;
+
+       mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
+       mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
+       mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
+
+       /* Remove all port->id's mcast enries */
+       mvpp2_prs_mcast_del_all(priv, id);
+
+       if (allmulti && !netdev_mc_empty(dev)) {
+               netdev_for_each_mc_addr(ha, dev)
+                       mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+       }
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       const struct sockaddr *addr = p;
+       int err;
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               err = -EADDRNOTAVAIL;
+               goto error;
+       }
+
+       if (!netif_running(dev)) {
+               err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+               if (!err)
+                       return 0;
+               /* Reconfigure parser to accept the original MAC address */
+               err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+               if (err)
+                       goto error;
+       }
+
+       mvpp2_stop_dev(port);
+
+       err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+       if (!err)
+               goto out_start;
+
+       /* Reconfigure parser accept the original MAC address */
+       err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+       if (err)
+               goto error;
+out_start:
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+       return 0;
+
+error:
+       netdev_err(dev, "fail to change MAC address\n");
+       return err;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int err;
+
+       mtu = mvpp2_check_mtu_valid(dev, mtu);
+       if (mtu < 0) {
+               err = mtu;
+               goto error;
+       }
+
+       if (!netif_running(dev)) {
+               err = mvpp2_bm_update_mtu(dev, mtu);
+               if (!err) {
+                       port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+                       return 0;
+               }
+
+               /* Reconfigure BM to the original MTU */
+               err = mvpp2_bm_update_mtu(dev, dev->mtu);
+               if (err)
+                       goto error;
+       }
+
+       mvpp2_stop_dev(port);
+
+       err = mvpp2_bm_update_mtu(dev, mtu);
+       if (!err) {
+               port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+               goto out_start;
+       }
+
+       /* Reconfigure BM to the original MTU */
+       err = mvpp2_bm_update_mtu(dev, dev->mtu);
+       if (err)
+               goto error;
+
+out_start:
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+
+       return 0;
+
+error:
+       netdev_err(dev, "fail to change MTU\n");
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       unsigned int start;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct mvpp2_pcpu_stats *cpu_stats;
+               u64 rx_packets;
+               u64 rx_bytes;
+               u64 tx_packets;
+               u64 tx_bytes;
+
+               cpu_stats = per_cpu_ptr(port->stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       rx_packets = cpu_stats->rx_packets;
+                       rx_bytes   = cpu_stats->rx_bytes;
+                       tx_packets = cpu_stats->tx_packets;
+                       tx_bytes   = cpu_stats->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+               stats->rx_packets += rx_packets;
+               stats->rx_bytes   += rx_bytes;
+               stats->tx_packets += tx_packets;
+               stats->tx_bytes   += tx_bytes;
+       }
+
+       stats->rx_errors        = dev->stats.rx_errors;
+       stats->rx_dropped       = dev->stats.rx_dropped;
+       stats->tx_dropped       = dev->stats.tx_dropped;
+
+       return stats;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_get_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!port->phy_dev)
+               return -ENODEV;
+       return phy_ethtool_gset(port->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_set_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!port->phy_dev)
+               return -ENODEV;
+       return phy_ethtool_sset(port->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
+                                     struct ethtool_coalesce *c)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int queue;
+
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+               rxq->time_coal = c->rx_coalesce_usecs;
+               rxq->pkts_coal = c->rx_max_coalesced_frames;
+               mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+               mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+       }
+
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               txq->done_pkts_coal = c->tx_max_coalesced_frames;
+       }
+
+       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
+       return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
+                                     struct ethtool_coalesce *c)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
+       c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
+       c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
+       return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+               sizeof(drvinfo->bus_info));
+}
+
+static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
+                                       struct ethtool_ringparam *ring)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       ring->rx_max_pending = MVPP2_MAX_RXD;
+       ring->tx_max_pending = MVPP2_MAX_TXD;
+       ring->rx_pending = port->rx_ring_size;
+       ring->tx_pending = port->tx_ring_size;
+}
+
+static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
+                                      struct ethtool_ringparam *ring)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       u16 prev_rx_ring_size = port->rx_ring_size;
+       u16 prev_tx_ring_size = port->tx_ring_size;
+       int err;
+
+       err = mvpp2_check_ringparam_valid(dev, ring);
+       if (err)
+               return err;
+
+       if (!netif_running(dev)) {
+               port->rx_ring_size = ring->rx_pending;
+               port->tx_ring_size = ring->tx_pending;
+               return 0;
+       }
+
+       /* The interface is running, so we have to force a
+        * reallocation of the queues
+        */
+       mvpp2_stop_dev(port);
+       mvpp2_cleanup_rxqs(port);
+       mvpp2_cleanup_txqs(port);
+
+       port->rx_ring_size = ring->rx_pending;
+       port->tx_ring_size = ring->tx_pending;
+
+       err = mvpp2_setup_rxqs(port);
+       if (err) {
+               /* Reallocate Rx queues with the original ring size */
+               port->rx_ring_size = prev_rx_ring_size;
+               ring->rx_pending = prev_rx_ring_size;
+               err = mvpp2_setup_rxqs(port);
+               if (err)
+                       goto err_out;
+       }
+       err = mvpp2_setup_txqs(port);
+       if (err) {
+               /* Reallocate Tx queues with the original ring size */
+               port->tx_ring_size = prev_tx_ring_size;
+               ring->tx_pending = prev_tx_ring_size;
+               err = mvpp2_setup_txqs(port);
+               if (err)
+                       goto err_clean_rxqs;
+       }
+
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+
+       return 0;
+
+err_clean_rxqs:
+       mvpp2_cleanup_rxqs(port);
+err_out:
+       netdev_err(dev, "fail to change ring parameters");
+       return err;
+}
+
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+       .ndo_open               = mvpp2_open,
+       .ndo_stop               = mvpp2_stop,
+       .ndo_start_xmit         = mvpp2_tx,
+       .ndo_set_rx_mode        = mvpp2_set_rx_mode,
+       .ndo_set_mac_address    = mvpp2_set_mac_address,
+       .ndo_change_mtu         = mvpp2_change_mtu,
+       .ndo_get_stats64        = mvpp2_get_stats64,
+};
+
+static const struct ethtool_ops mvpp2_eth_tool_ops = {
+       .get_link       = ethtool_op_get_link,
+       .get_settings   = mvpp2_ethtool_get_settings,
+       .set_settings   = mvpp2_ethtool_set_settings,
+       .set_coalesce   = mvpp2_ethtool_set_coalesce,
+       .get_coalesce   = mvpp2_ethtool_get_coalesce,
+       .get_drvinfo    = mvpp2_ethtool_get_drvinfo,
+       .get_ringparam  = mvpp2_ethtool_get_ringparam,
+       .set_ringparam  = mvpp2_ethtool_set_ringparam,
+};
+
+/* Driver initialization */
+
+static void mvpp2_port_power_up(struct mvpp2_port *port)
+{
+       mvpp2_port_mii_set(port);
+       mvpp2_port_periodic_xon_disable(port);
+       mvpp2_port_reset(port);
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *port)
+{
+       struct device *dev = port->dev->dev.parent;
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int queue, cpu, err;
+
+       if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+               return -EINVAL;
+
+       /* Disable port */
+       mvpp2_egress_disable(port);
+       mvpp2_port_disable(port);
+
+       port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
+                                 GFP_KERNEL);
+       if (!port->txqs)
+               return -ENOMEM;
+
+       /* Associate physical Tx queues to this port and initialize.
+        * The mapping is predefined.
+        */
+       for (queue = 0; queue < txq_number; queue++) {
+               int queue_phy_id = mvpp2_txq_phys(port->id, queue);
+               struct mvpp2_tx_queue *txq;
+
+               txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+               if (!txq)
+                       return -ENOMEM;
+
+               txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+               if (!txq->pcpu) {
+                       err = -ENOMEM;
+                       goto err_free_percpu;
+               }
+
+               txq->id = queue_phy_id;
+               txq->log_id = queue;
+               txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+               for_each_present_cpu(cpu) {
+                       txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+                       txq_pcpu->cpu = cpu;
+               }
+
+               port->txqs[queue] = txq;
+       }
+
+       port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
+                                 GFP_KERNEL);
+       if (!port->rxqs) {
+               err = -ENOMEM;
+               goto err_free_percpu;
+       }
+
+       /* Allocate and initialize Rx queue for this port */
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq;
+
+               /* Map physical Rx queue to port's logical Rx queue */
+               rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+               if (!rxq)
+                       goto err_free_percpu;
+               /* Map this Rx queue to a physical queue */
+               rxq->id = port->first_rxq + queue;
+               rxq->port = port->id;
+               rxq->logic_rxq = queue;
+
+               port->rxqs[queue] = rxq;
+       }
+
+       /* Configure Rx queue group interrupt for this port */
+       mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+
+       /* Create Rx descriptor rings */
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+               rxq->size = port->rx_ring_size;
+               rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+               rxq->time_coal = MVPP2_RX_COAL_USEC;
+       }
+
+       mvpp2_ingress_disable(port);
+
+       /* Port default configuration */
+       mvpp2_defaults_set(port);
+
+       /* Port's classifier configuration */
+       mvpp2_cls_oversize_rxq_set(port);
+       mvpp2_cls_port_config(port);
+
+       /* Provide an initial Rx packet size */
+       port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
+
+       /* Initialize pools for swf */
+       err = mvpp2_swf_bm_pool_init(port);
+       if (err)
+               goto err_free_percpu;
+
+       return 0;
+
+err_free_percpu:
+       for (queue = 0; queue < txq_number; queue++) {
+               if (!port->txqs[queue])
+                       continue;
+               free_percpu(port->txqs[queue]->pcpu);
+       }
+       return err;
+}
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+                           struct device_node *port_node,
+                           struct mvpp2 *priv,
+                           int *next_first_rxq)
+{
+       struct device_node *phy_node;
+       struct mvpp2_port *port;
+       struct net_device *dev;
+       struct resource *res;
+       const char *dt_mac_addr;
+       const char *mac_from;
+       char hw_mac_addr[ETH_ALEN];
+       u32 id;
+       int features;
+       int phy_mode;
+       int priv_common_regs_num = 2;
+       int err, i;
+
+       dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
+                                rxq_number);
+       if (!dev)
+               return -ENOMEM;
+
+       phy_node = of_parse_phandle(port_node, "phy", 0);
+       if (!phy_node) {
+               dev_err(&pdev->dev, "missing phy\n");
+               err = -ENODEV;
+               goto err_free_netdev;
+       }
+
+       phy_mode = of_get_phy_mode(port_node);
+       if (phy_mode < 0) {
+               dev_err(&pdev->dev, "incorrect phy mode\n");
+               err = phy_mode;
+               goto err_free_netdev;
+       }
+
+       if (of_property_read_u32(port_node, "port-id", &id)) {
+               err = -EINVAL;
+               dev_err(&pdev->dev, "missing port-id value\n");
+               goto err_free_netdev;
+       }
+
+       dev->tx_queue_len = MVPP2_MAX_TXD;
+       dev->watchdog_timeo = 5 * HZ;
+       dev->netdev_ops = &mvpp2_netdev_ops;
+       dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+       port = netdev_priv(dev);
+
+       port->irq = irq_of_parse_and_map(port_node, 0);
+       if (port->irq <= 0) {
+               err = -EINVAL;
+               goto err_free_netdev;
+       }
+
+       if (of_property_read_bool(port_node, "marvell,loopback"))
+               port->flags |= MVPP2_F_LOOPBACK;
+
+       port->priv = priv;
+       port->id = id;
+       port->first_rxq = *next_first_rxq;
+       port->phy_node = phy_node;
+       port->phy_interface = phy_mode;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   priv_common_regs_num + id);
+       port->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(port->base)) {
+               err = PTR_ERR(port->base);
+               goto err_free_irq;
+       }
+
+       /* Alloc per-cpu stats */
+       port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
+       if (!port->stats) {
+               err = -ENOMEM;
+               goto err_free_irq;
+       }
+
+       dt_mac_addr = of_get_mac_address(port_node);
+       if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+               mac_from = "device tree";
+               ether_addr_copy(dev->dev_addr, dt_mac_addr);
+       } else {
+               mvpp2_get_mac_address(port, hw_mac_addr);
+               if (is_valid_ether_addr(hw_mac_addr)) {
+                       mac_from = "hardware";
+                       ether_addr_copy(dev->dev_addr, hw_mac_addr);
+               } else {
+                       mac_from = "random";
+                       eth_hw_addr_random(dev);
+               }
+       }
+
+       port->tx_ring_size = MVPP2_MAX_TXD;
+       port->rx_ring_size = MVPP2_MAX_RXD;
+       port->dev = dev;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = mvpp2_port_init(port);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to init port %d\n", id);
+               goto err_free_stats;
+       }
+       mvpp2_port_power_up(port);
+
+       netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
+       features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = features | NETIF_F_RXCSUM;
+       dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
+       dev->vlan_features |= features;
+
+       err = register_netdev(dev);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register netdev\n");
+               goto err_free_txq_pcpu;
+       }
+       netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+       /* Increment the first Rx queue number to be used by the next port */
+       *next_first_rxq += rxq_number;
+       priv->port_list[id] = port;
+       return 0;
+
+err_free_txq_pcpu:
+       for (i = 0; i < txq_number; i++)
+               free_percpu(port->txqs[i]->pcpu);
+err_free_stats:
+       free_percpu(port->stats);
+err_free_irq:
+       irq_dispose_mapping(port->irq);
+err_free_netdev:
+       free_netdev(dev);
+       return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *port)
+{
+       int i;
+
+       unregister_netdev(port->dev);
+       free_percpu(port->stats);
+       for (i = 0; i < txq_number; i++)
+               free_percpu(port->txqs[i]->pcpu);
+       irq_dispose_mapping(port->irq);
+       free_netdev(port->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+                                   struct mvpp2 *priv)
+{
+       u32 win_enable;
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
+               mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
+
+               if (i < 4)
+                       mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
+       }
+
+       win_enable = 0;
+
+       for (i = 0; i < dram->num_cs; i++) {
+               const struct mbus_dram_window *cs = dram->cs + i;
+
+               mvpp2_write(priv, MVPP2_WIN_BASE(i),
+                           (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+                           dram->mbus_dram_target_id);
+
+               mvpp2_write(priv, MVPP2_WIN_SIZE(i),
+                           (cs->size - 1) & 0xffff0000);
+
+               win_enable |= (1 << i);
+       }
+
+       mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
+{
+       int port;
+
+       for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+               mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+                           MVPP2_RX_FIFO_PORT_DATA_SIZE);
+               mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+                           MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+       }
+
+       mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+                   MVPP2_RX_FIFO_PORT_MIN_PKT);
+       mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       const struct mbus_dram_target_info *dram_target_info;
+       int err, i;
+
+       /* Checks for hardware constraints */
+       if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+           (txq_number > MVPP2_MAX_TXQ)) {
+               dev_err(&pdev->dev, "invalid queue size parameter\n");
+               return -EINVAL;
+       }
+
+       /* MBUS windows configuration */
+       dram_target_info = mv_mbus_dram_info();
+       if (dram_target_info)
+               mvpp2_conf_mbus_windows(dram_target_info, priv);
+
+       /* Allocate and initialize aggregated TXQs */
+       priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+                                      sizeof(struct mvpp2_tx_queue),
+                                      GFP_KERNEL);
+       if (!priv->aggr_txqs)
+               return -ENOMEM;
+
+       for_each_present_cpu(i) {
+               priv->aggr_txqs[i].id = i;
+               priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+               err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
+                                         MVPP2_AGGR_TXQ_SIZE, i, priv);
+               if (err < 0)
+                       return err;
+       }
+
+       /* Rx Fifo Init */
+       mvpp2_rx_fifo_init(priv);
+
+       /* Reset Rx queue group interrupt configuration */
+       for (i = 0; i < MVPP2_MAX_PORTS; i++)
+               mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+
+       writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+              priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+       /* Allow cache snoop when transmiting packets */
+       mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
+
+       /* Buffer Manager initialization */
+       err = mvpp2_bm_init(pdev, priv);
+       if (err < 0)
+               return err;
+
+       /* Parser default initialization */
+       err = mvpp2_prs_default_init(pdev, priv);
+       if (err < 0)
+               return err;
+
+       /* Classifier default initialization */
+       mvpp2_cls_init(priv);
+
+       return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+       struct device_node *dn = pdev->dev.of_node;
+       struct device_node *port_node;
+       struct mvpp2 *priv;
+       struct resource *res;
+       int port_count, first_rxq;
+       int err;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->lms_base))
+               return PTR_ERR(priv->lms_base);
+
+       priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+       if (IS_ERR(priv->pp_clk))
+               return PTR_ERR(priv->pp_clk);
+       err = clk_prepare_enable(priv->pp_clk);
+       if (err < 0)
+               return err;
+
+       priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+       if (IS_ERR(priv->gop_clk)) {
+               err = PTR_ERR(priv->gop_clk);
+               goto err_pp_clk;
+       }
+       err = clk_prepare_enable(priv->gop_clk);
+       if (err < 0)
+               goto err_pp_clk;
+
+       /* Get system's tclk rate */
+       priv->tclk = clk_get_rate(priv->pp_clk);
+
+       /* Initialize network controller */
+       err = mvpp2_init(pdev, priv);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to initialize controller\n");
+               goto err_gop_clk;
+       }
+
+       port_count = of_get_available_child_count(dn);
+       if (port_count == 0) {
+               dev_err(&pdev->dev, "no ports enabled\n");
+               err = -ENODEV;
+               goto err_gop_clk;
+       }
+
+       priv->port_list = devm_kcalloc(&pdev->dev, port_count,
+                                     sizeof(struct mvpp2_port *),
+                                     GFP_KERNEL);
+       if (!priv->port_list) {
+               err = -ENOMEM;
+               goto err_gop_clk;
+       }
+
+       /* Initialize ports */
+       first_rxq = 0;
+       for_each_available_child_of_node(dn, port_node) {
+               err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+               if (err < 0)
+                       goto err_gop_clk;
+       }
+
+       platform_set_drvdata(pdev, priv);
+       return 0;
+
+err_gop_clk:
+       clk_disable_unprepare(priv->gop_clk);
+err_pp_clk:
+       clk_disable_unprepare(priv->pp_clk);
+       return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+       struct mvpp2 *priv = platform_get_drvdata(pdev);
+       struct device_node *dn = pdev->dev.of_node;
+       struct device_node *port_node;
+       int i = 0;
+
+       for_each_available_child_of_node(dn, port_node) {
+               if (priv->port_list[i])
+                       mvpp2_port_remove(priv->port_list[i]);
+               i++;
+       }
+
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+               mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+       }
+
+       for_each_present_cpu(i) {
+               struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
+
+               dma_free_coherent(&pdev->dev,
+                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 aggr_txq->descs,
+                                 aggr_txq->descs_phys);
+       }
+
+       clk_disable_unprepare(priv->pp_clk);
+       clk_disable_unprepare(priv->gop_clk);
+
+       return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+       { .compatible = "marvell,armada-375-pp2" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+static struct platform_driver mvpp2_driver = {
+       .probe = mvpp2_probe,
+       .remove = mvpp2_remove,
+       .driver = {
+               .name = MVPP2_DRIVER_NAME,
+               .of_match_table = mvpp2_match,
+       },
+};
+
+module_platform_driver(mvpp2_driver);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
index 7345c43b019e52e9e45ab3f05b769b618f17f468..887cf01d831d7c1937cd7dee11aa0655f5541ffc 100644 (file)
@@ -760,21 +760,22 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
        return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
 }
 
-static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
+static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
+                             unsigned char new_mac[ETH_ALEN + 2])
 {
        int err = 0;
 
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
                err = mlx4_en_replace_mac(priv, priv->base_qpn,
-                                         priv->dev->dev_addr, priv->prev_mac);
+                                         new_mac, priv->current_mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
        } else
                en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
 
-       memcpy(priv->prev_mac, priv->dev->dev_addr,
-              sizeof(priv->prev_mac));
+       if (!err)
+               memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
 
        return err;
 }
@@ -784,14 +785,17 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct sockaddr *saddr = addr;
+       unsigned char new_mac[ETH_ALEN + 2];
        int err;
 
        if (!is_valid_ether_addr(saddr->sa_data))
                return -EADDRNOTAVAIL;
 
        mutex_lock(&mdev->state_lock);
-       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-       err = mlx4_en_do_set_mac(priv);
+       memcpy(new_mac, saddr->sa_data, ETH_ALEN);
+       err = mlx4_en_do_set_mac(priv, new_mac);
+       if (!err)
+               memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
        mutex_unlock(&mdev->state_lock);
 
        return err;
@@ -940,11 +944,6 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
                                          0, MLX4_MCAST_DISABLE);
                if (err)
                        en_err(priv, "Failed disabling multicast filter\n");
-
-               /* Disable port VLAN filter */
-               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
-                       en_err(priv, "Failed disabling VLAN filter\n");
        }
 }
 
@@ -993,11 +992,6 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
                        en_err(priv, "Failed disabling promiscuous mode\n");
                break;
        }
-
-       /* Enable port VLAN filter */
-       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-       if (err)
-               en_err(priv, "Failed enabling VLAN filter\n");
 }
 
 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
@@ -1166,7 +1160,8 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
                        }
 
                        /* MAC address of the port is not in uc list */
-                       if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+                       if (ether_addr_equal_64bits(entry->mac,
+                                                   priv->current_mac))
                                found = true;
 
                        if (!found) {
@@ -1476,7 +1471,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
                queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
        }
        if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
-               mlx4_en_do_set_mac(priv);
+               mlx4_en_do_set_mac(priv, priv->current_mac);
                mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
        }
        mutex_unlock(&mdev->state_lock);
@@ -2535,7 +2530,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                }
        }
 
-       memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+       memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
 
        priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
                                          DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
index 5535862f27cc57c0dbb0c9b972e020477e858c6a..7765a08f9e841cd30587e404a1c7f58b94b88fdc 100644 (file)
@@ -933,7 +933,7 @@ static const int frag_sizes[] = {
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
        int buf_size = 0;
        int i = 0;
 
index 03e5f6ac67e7660dbc68c6fe69e5123247588bf0..49d5afc7cfb84cfcd413b73d6e7ff9b938802256 100644 (file)
@@ -159,7 +159,8 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
                if (priv->mdev->dev->caps.flags &
                                        MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
                        buf[3] = mlx4_en_test_registers(priv);
-                       buf[4] = mlx4_en_test_loopback(priv);
+                       if (priv->port_up)
+                               buf[4] = mlx4_en_test_loopback(priv);
                }
 
                if (carrier_ok)
index 4c36def8e10f9b518a1ba8a3f05340eb63c4dc0a..d80e7a6fac74c4381cea11f946c889216a5ce05d 100644 (file)
@@ -270,7 +270,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
         * we need to add it as a duplicate to this entry
         * for future references */
        list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == pqp->qpn)
+               if (qpn == dqp->qpn)
                        return 0; /* qp is already duplicated */
        }
 
@@ -324,24 +324,22 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
        return true;
 }
 
-/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
-                                     enum mlx4_steer_type steer,
-                                     unsigned int index, u32 tqpn)
+/* Returns true if all the QPs != tqpn contained in this entry
+ * are Promisc QPs. Returns false otherwise.
+ */
+static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
+                                  enum mlx4_steer_type steer,
+                                  unsigned int index, u32 tqpn,
+                                  u32 *members_count)
 {
-       struct mlx4_steer *s_steer;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry = NULL, *tmp_entry;
-       u32 qpn;
-       u32 members_count;
+       u32 m_count;
        bool ret = false;
        int i;
 
        if (port < 1 || port > dev->caps.num_ports)
-               return NULL;
-
-       s_steer = &mlx4_priv(dev)->steer[port - 1];
+               return false;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -350,21 +348,61 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
 
        if (mlx4_READ_ENTRY(dev, index, mailbox))
                goto out;
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (i = 0;  i < members_count; i++) {
-               qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
+       m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       if (members_count)
+               *members_count = m_count;
+
+       for (i = 0;  i < m_count; i++) {
+               u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
                if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
        }
-        /* All the qps currently registered for this entry are promiscuous,
+       ret = true;
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return ret;
+}
+
+/* IF a steering entry contains only promisc QPs, it can be removed. */
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
+                                     enum mlx4_steer_type steer,
+                                     unsigned int index, u32 tqpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_steer_index *entry = NULL, *tmp_entry;
+       u32 members_count;
+       bool ret = false;
+
+       if (port < 1 || port > dev->caps.num_ports)
+               return NULL;
+
+       s_steer = &mlx4_priv(dev)->steer[port - 1];
+
+       if (!promisc_steering_entry(dev, port, steer, index,
+                                   tqpn, &members_count))
+               goto out;
+
+       /* All the qps currently registered for this entry are promiscuous,
          * Checking for duplicates */
        ret = true;
        list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
                if (entry->index == index) {
-                       if (list_empty(&entry->duplicates)) {
+                       if (list_empty(&entry->duplicates) ||
+                           members_count == 1) {
+                               struct mlx4_promisc_qp *pqp, *tmp_pqp;
+                               /* If there is only 1 entry in duplicates then
+                                * this is the QP we want to delete, going over
+                                * the list and deleting the entry.
+                                */
                                list_del(&entry->list);
+                               list_for_each_entry_safe(pqp, tmp_pqp,
+                                                        &entry->duplicates,
+                                                        list) {
+                                       list_del(&pqp->list);
+                                       kfree(pqp);
+                               }
                                kfree(entry);
                        } else {
                                /* This entry contains duplicates so it shouldn't be removed */
@@ -375,7 +413,6 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
        }
 
 out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
 
@@ -421,42 +458,57 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
        }
        mgm = mailbox->buf;
 
-       /* the promisc qp needs to be added for each one of the steering
-        * entries, if it already exists, needs to be added as a duplicate
-        * for this entry */
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-               if (err)
-                       goto out_mailbox;
+       if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
+               /* The promisc QP needs to be added for each one of the steering
+                * entries. If it already exists, needs to be added as
+                * a duplicate for this entry.
+                */
+               list_for_each_entry(entry,
+                                   &s_steer->steer_entries[steer],
+                                   list) {
+                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+                       if (err)
+                               goto out_mailbox;
 
-               members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-               prot = be32_to_cpu(mgm->members_count) >> 30;
-               found = false;
-               for (i = 0; i < members_count; i++) {
-                       if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
-                               /* Entry already exists, add to duplicates */
-                               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-                               if (!dqp) {
+                       members_count = be32_to_cpu(mgm->members_count) &
+                                       0xffffff;
+                       prot = be32_to_cpu(mgm->members_count) >> 30;
+                       found = false;
+                       for (i = 0; i < members_count; i++) {
+                               if ((be32_to_cpu(mgm->qp[i]) &
+                                    MGM_QPN_MASK) == qpn) {
+                                       /* Entry already exists.
+                                        * Add to duplicates.
+                                        */
+                                       dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
+                                       if (!dqp) {
+                                               err = -ENOMEM;
+                                               goto out_mailbox;
+                                       }
+                                       dqp->qpn = qpn;
+                                       list_add_tail(&dqp->list,
+                                                     &entry->duplicates);
+                                       found = true;
+                               }
+                       }
+                       if (!found) {
+                               /* Need to add the qpn to mgm */
+                               if (members_count ==
+                                   dev->caps.num_qp_per_mgm) {
+                                       /* entry is full */
                                        err = -ENOMEM;
                                        goto out_mailbox;
                                }
-                               dqp->qpn = qpn;
-                               list_add_tail(&dqp->list, &entry->duplicates);
-                               found = true;
-                       }
-               }
-               if (!found) {
-                       /* Need to add the qpn to mgm */
-                       if (members_count == dev->caps.num_qp_per_mgm) {
-                               /* entry is full */
-                               err = -ENOMEM;
-                               goto out_mailbox;
+                               mgm->qp[members_count++] =
+                                       cpu_to_be32(qpn & MGM_QPN_MASK);
+                               mgm->members_count =
+                                       cpu_to_be32(members_count |
+                                                   (prot << 30));
+                               err = mlx4_WRITE_ENTRY(dev, entry->index,
+                                                      mailbox);
+                               if (err)
+                                       goto out_mailbox;
                        }
-                       mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
-                       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                       if (err)
-                               goto out_mailbox;
                }
        }
 
@@ -465,8 +517,14 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
        /* now need to add all the promisc qps to default entry */
        memset(mgm, 0, sizeof *mgm);
        members_count = 0;
-       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
+               if (members_count == dev->caps.num_qp_per_mgm) {
+                       /* entry is full */
+                       err = -ENOMEM;
+                       goto out_list;
+               }
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+       }
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
        err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
@@ -495,13 +553,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
        struct mlx4_steer *s_steer;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry;
+       struct mlx4_steer_index *entry, *tmp_entry;
        struct mlx4_promisc_qp *pqp;
        struct mlx4_promisc_qp *dqp;
        u32 members_count;
        bool found;
        bool back_to_list = false;
-       int loc, i;
+       int i;
        int err;
 
        if (port < 1 || port > dev->caps.num_ports)
@@ -538,39 +596,73 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
        if (err)
                goto out_mailbox;
 
-       /* remove the qp from all the steering entries*/
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               found = false;
-               list_for_each_entry(dqp, &entry->duplicates, list) {
-                       if (dqp->qpn == qpn) {
-                               found = true;
-                               break;
+       if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
+               /* Remove the QP from all the steering entries */
+               list_for_each_entry_safe(entry, tmp_entry,
+                                        &s_steer->steer_entries[steer],
+                                        list) {
+                       found = false;
+                       list_for_each_entry(dqp, &entry->duplicates, list) {
+                               if (dqp->qpn == qpn) {
+                                       found = true;
+                                       break;
+                               }
                        }
-               }
-               if (found) {
-                       /* a duplicate, no need to change the mgm,
-                        * only update the duplicates list */
-                       list_del(&dqp->list);
-                       kfree(dqp);
-               } else {
-                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-                               if (err)
-                                       goto out_mailbox;
-                       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-                       for (loc = -1, i = 0; i < members_count; ++i)
-                               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
-                                       loc = i;
-
-                       mgm->members_count = cpu_to_be32(--members_count |
-                                                        (MLX4_PROT_ETH << 30));
-                       mgm->qp[loc] = mgm->qp[i - 1];
-                       mgm->qp[i - 1] = 0;
+                       if (found) {
+                               /* A duplicate, no need to change the MGM,
+                                * only update the duplicates list
+                                */
+                               list_del(&dqp->list);
+                               kfree(dqp);
+                       } else {
+                               int loc = -1;
+
+                               err = mlx4_READ_ENTRY(dev,
+                                                     entry->index,
+                                                     mailbox);
+                                       if (err)
+                                               goto out_mailbox;
+                               members_count =
+                                       be32_to_cpu(mgm->members_count) &
+                                       0xffffff;
+                               if (!members_count) {
+                                       mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
+                                                 qpn, entry->index);
+                                       list_del(&entry->list);
+                                       kfree(entry);
+                                       continue;
+                               }
 
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                               if (err)
+                               for (i = 0; i < members_count; ++i)
+                                       if ((be32_to_cpu(mgm->qp[i]) &
+                                            MGM_QPN_MASK) == qpn) {
+                                               loc = i;
+                                               break;
+                                       }
+
+                               if (loc < 0) {
+                                       mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
+                                                qpn, entry->index);
+                                       err = -EINVAL;
                                        goto out_mailbox;
-               }
+                               }
 
+                               /* Copy the last QP in this MGM
+                                * over removed QP
+                                */
+                               mgm->qp[loc] = mgm->qp[members_count - 1];
+                               mgm->qp[members_count - 1] = 0;
+                               mgm->members_count =
+                                       cpu_to_be32(--members_count |
+                                                   (MLX4_PROT_ETH << 30));
+
+                               err = mlx4_WRITE_ENTRY(dev,
+                                                      entry->index,
+                                                      mailbox);
+                                       if (err)
+                                               goto out_mailbox;
+                       }
+               }
        }
 
 out_mailbox:
@@ -1062,7 +1154,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        struct mlx4_mgm *mgm;
        u32 members_count;
        int prev, index;
-       int i, loc;
+       int i, loc = -1;
        int err;
        u8 port = gid[5];
        bool removed_entry = false;
@@ -1085,15 +1177,20 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                goto out;
        }
 
-       /* if this pq is also a promisc qp, it shouldn't be removed */
+       /* If this QP is also a promisc QP, it shouldn't be removed only if
+        * at least one none promisc QP is also attached to this MCG
+        */
        if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, port, steer, index, qp->qpn))
-               goto out;
+           check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
+           !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
+                       goto out;
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (loc = -1, i = 0; i < members_count; ++i)
-               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
+       for (i = 0; i < members_count; ++i)
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
                        loc = i;
+                       break;
+               }
 
        if (loc == -1) {
                mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
@@ -1101,15 +1198,15 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                goto out;
        }
 
-
+       /* copy the last QP in this MGM over removed QP */
+       mgm->qp[loc] = mgm->qp[members_count - 1];
+       mgm->qp[members_count - 1] = 0;
        mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
-       mgm->qp[loc]       = mgm->qp[i - 1];
-       mgm->qp[i - 1]     = 0;
 
        if (prot == MLX4_PROT_ETH)
                removed_entry = can_remove_steering_entry(dev, port, steer,
                                                                index, qp->qpn);
-       if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
+       if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
                goto out;
        }
index 1d8af7336807b649feb5a5cdb8c4c32a303d31b2..13fbcd03c3e414674ac37b4737d4fedc3998b264 100644 (file)
 
 #define INIT_HCA_TPT_MW_ENABLE          (1 << 7)
 
-#define MLX4_NUM_UP            8
-#define MLX4_NUM_TC            8
-#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
-#define MLX4_RATELIMIT_DEFAULT 0xffff
-
 struct mlx4_set_port_prio2tc_context {
        u8 prio2tc[4];
 };
index d72a5a894fc6aef71315c098141f326d1b2dd55d..2b19dd1f2c5d76da0972401734c584d678261a6a 100644 (file)
@@ -154,8 +154,6 @@ enum {
 #define MLX4_EN_TX_POLL_MODER  16
 #define MLX4_EN_TX_POLL_TIMEOUT        (HZ / 4)
 
-#define ETH_LLC_SNAP_SIZE      8
-
 #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
 #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
@@ -535,7 +533,7 @@ struct mlx4_en_priv {
        int registered;
        int allocated;
        int stride;
-       unsigned char prev_mac[ETH_ALEN + 2];
+       unsigned char current_mac[ETH_ALEN + 2];
        int mac_index;
        unsigned max_mtu;
        int base_qpn;
index 7ab97174886d20fd7c6b02d81c58fc3961790469..9ba0c1ca10d59ffe1a2be56b67439f0013b8ad59 100644 (file)
@@ -244,10 +244,16 @@ EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
 
 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
+       struct mlx4_port_info *info;
+       struct mlx4_mac_table *table;
        int index;
 
+       if (port < 1 || port > dev->caps.num_ports) {
+               mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
+               return;
+       }
+       info = &mlx4_priv(dev)->port[port];
+       table = &info->mac_table;
        mutex_lock(&table->mutex);
        index = find_index(dev, table, mac);
 
@@ -1051,14 +1057,26 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
 
        for (i = 0; i < MLX4_NUM_TC; i++) {
                struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
-               u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
-                       MLX4_RATELIMIT_DEFAULT;
+               u16 r;
+
+               if (ratelimit && ratelimit[i]) {
+                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
+                               r = ratelimit[i];
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_100M_UNITS);
+                       } else {
+                               r = ratelimit[i]/10;
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_1G_UNITS);
+                       }
+                       tc->max_bw_value = htons(r);
+               } else {
+                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
+                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
+               }
 
                tc->pg = htons(pg[i]);
                tc->bw_precentage = htons(tc_tx_bw[i]);
-
-               tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
-               tc->max_bw_value = htons(r);
        }
 
        in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
index be618b9e874f6a12c16d5876fc7b5302b73b4a4f..16039d1497b84a68efb84aa8f66aa0fd250418f4 100644 (file)
@@ -39,8 +39,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 60
-#define QLCNIC_LINUX_VERSIONID  "5.3.60"
+#define _QLCNIC_LINUX_SUBVERSION 61
+#define QLCNIC_LINUX_VERSIONID  "5.3.61"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
index 561cb11ca58c5818dd1716c84ffff7eb9f000f8c..a72bcddf160ac2eb65f03438615d29aaa40e434e 100644 (file)
@@ -926,7 +926,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
        }
 }
 
-static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct dcb_app app = {
@@ -935,7 +935,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
                             };
 
        if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
-               return 0;
+               return -EINVAL;
 
        return dcb_getapp(netdev, &app);
 }
index 4fc186713b660c8d823744f842cf3551dd660092..0fdbcc8319f75e2f01a861c2fe727c67c1a493a1 100644 (file)
@@ -427,16 +427,17 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 }
 
 static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
-                       struct net_device *netdev, int idx)
+                       struct net_device *netdev,
+                       struct net_device *filter_dev, int idx)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        if (!adapter->fdb_mac_learn)
-               return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+               return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
 
        if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
            qlcnic_sriov_check(adapter))
-               idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+               idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
 
        return idx;
 }
@@ -2980,17 +2981,43 @@ static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
        }
 }
 
-static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        struct net_device *netdev = adapter->netdev;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_tx_ring *tx_ring;
        int ring;
 
        if (!netdev || !netif_running(netdev))
                return;
 
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               if (!rds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n",
+                            ring, readl(rds_ring->crb_rcv_producer),
+                            rds_ring->producer, rds_ring->num_desc);
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &(recv_ctx->sds_rings[ring]);
+               if (!sds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n",
+                           ring, readl(sds_ring->crb_sts_consumer),
+                           sds_ring->consumer, readl(sds_ring->crb_intr_mask),
+                           sds_ring->num_desc);
+       }
+
        for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
                tx_ring = &adapter->tx_ring[ring];
+               if (!tx_ring)
+                       continue;
                netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
                            ring, tx_ring->ctx_id);
                netdev_info(netdev,
@@ -3013,9 +3040,10 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
                            tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
 
-               if (netif_msg_tx_done(adapter->ahw))
+               if (netif_msg_tx_err(adapter->ahw))
                        dump_tx_ring_desc(tx_ring);
        }
+
 }
 
 static void qlcnic_tx_timeout(struct net_device *netdev)
@@ -3025,16 +3053,18 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
 
-       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
-               netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+       qlcnic_dump_rings(adapter);
+
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS ||
+           netif_msg_tx_err(adapter->ahw)) {
+               netdev_err(netdev, "Tx timeout, reset the adapter.\n");
                if (qlcnic_82xx_check(adapter))
                        adapter->need_fw_reset = 1;
                else if (qlcnic_83xx_check(adapter))
                        qlcnic_83xx_idc_request_reset(adapter,
                                                      QLCNIC_FORCE_FW_DUMP_KEY);
        } else {
-               netdev_info(netdev, "Tx timeout, reset adapter context.\n");
-               qlcnic_dump_tx_rings(adapter);
+               netdev_err(netdev, "Tx timeout, reset adapter context.\n");
                adapter->ahw->reset_context = 1;
        }
 }
index 61623e9af57424b1298c7db02641d78d6154b751..9887bcb45b8411bc8e4376a40e48b5d004a420af 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/firmware.h>
 #include <linux/pci-aspm.h>
 #include <linux/prefetch.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -627,39 +629,22 @@ enum rtl_tx_desc_bit_0 {
 
 /* 8102e, 8168c and beyond. */
 enum rtl_tx_desc_bit_1 {
+       /* First doubleword. */
+       TD1_GTSENV4     = (1 << 26),            /* Giant Send for IPv4 */
+       TD1_GTSENV6     = (1 << 25),            /* Giant Send for IPv6 */
+#define GTTCPHO_SHIFT                  18
+#define GTTCPHO_MAX                    0x7fU
+
        /* Second doubleword. */
+#define TCPHO_SHIFT                    18
+#define TCPHO_MAX                      0x3ffU
 #define TD1_MSS_SHIFT                  18      /* MSS position (11 bits) */
-       TD1_IP_CS       = (1 << 29),            /* Calculate IP checksum */
+       TD1_IPv6_CS     = (1 << 28),            /* Calculate IPv6 checksum */
+       TD1_IPv4_CS     = (1 << 29),            /* Calculate IPv4 checksum */
        TD1_TCP_CS      = (1 << 30),            /* Calculate TCP/IP checksum */
        TD1_UDP_CS      = (1 << 31),            /* Calculate UDP/IP checksum */
 };
 
-static const struct rtl_tx_desc_info {
-       struct {
-               u32 udp;
-               u32 tcp;
-       } checksum;
-       u16 mss_shift;
-       u16 opts_offset;
-} tx_desc_info [] = {
-       [RTL_TD_0] = {
-               .checksum = {
-                       .udp    = TD0_IP_CS | TD0_UDP_CS,
-                       .tcp    = TD0_IP_CS | TD0_TCP_CS
-               },
-               .mss_shift      = TD0_MSS_SHIFT,
-               .opts_offset    = 0
-       },
-       [RTL_TD_1] = {
-               .checksum = {
-                       .udp    = TD1_IP_CS | TD1_UDP_CS,
-                       .tcp    = TD1_IP_CS | TD1_TCP_CS
-               },
-               .mss_shift      = TD1_MSS_SHIFT,
-               .opts_offset    = 1
-       }
-};
-
 enum rtl_rx_desc_bit {
        /* Rx private */
        PID1            = (1 << 18), /* Protocol ID bit 1/2 */
@@ -783,6 +768,7 @@ struct rtl8169_private {
        unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
        unsigned int (*link_ok)(void __iomem *);
        int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
+       bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
 
        struct {
                DECLARE_BITMAP(flags, RTL_FLAG_MAX);
@@ -5968,32 +5954,179 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
        return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 }
 
-static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
-                                   struct sk_buff *skb, u32 *opts)
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+                                     struct net_device *dev);
+/* r8169_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8169_csum_workaround(struct rtl8169_private *tp,
+                                 struct sk_buff *skb)
+{
+       if (skb_shinfo(skb)->gso_size) {
+               netdev_features_t features = tp->dev->features;
+               struct sk_buff *segs, *nskb;
+
+               features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+               segs = skb_gso_segment(skb, features);
+               if (IS_ERR(segs) || !segs)
+                       goto drop;
+
+               do {
+                       nskb = segs;
+                       segs = segs->next;
+                       nskb->next = NULL;
+                       rtl8169_start_xmit(nskb, tp->dev);
+               } while (segs);
+
+               dev_kfree_skb(skb);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (skb_checksum_help(skb) < 0)
+                       goto drop;
+
+               rtl8169_start_xmit(skb, tp->dev);
+       } else {
+               struct net_device_stats *stats;
+
+drop:
+               stats = &tp->dev->stats;
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+       }
+}
+
+/* msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+       int ret;
+
+       ret = skb_cow_head(skb, 0);
+       if (ret)
+               return ret;
+
+       ipv6h = ipv6_hdr(skb);
+       th = tcp_hdr(skb);
+
+       th->check = 0;
+       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+       return ret;
+}
+
+static inline __be16 get_protocol(struct sk_buff *skb)
+{
+       __be16 protocol;
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+       else
+               protocol = skb->protocol;
+
+       return protocol;
+}
+
+static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
+                               struct sk_buff *skb, u32 *opts)
 {
-       const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
        u32 mss = skb_shinfo(skb)->gso_size;
-       int offset = info->opts_offset;
 
        if (mss) {
                opts[0] |= TD_LSO;
-               opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+               opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const struct iphdr *ip = ip_hdr(skb);
 
+               if (ip->protocol == IPPROTO_TCP)
+                       opts[0] |= TD0_IP_CS | TD0_TCP_CS;
+               else if (ip->protocol == IPPROTO_UDP)
+                       opts[0] |= TD0_IP_CS | TD0_UDP_CS;
+               else
+                       WARN_ON_ONCE(1);
+       }
+
+       return true;
+}
+
+static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
+                               struct sk_buff *skb, u32 *opts)
+{
+       u32 transport_offset = (u32)skb_transport_offset(skb);
+       u32 mss = skb_shinfo(skb)->gso_size;
+
+       if (mss) {
+               if (transport_offset > GTTCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->dev,
+                                  "Invalid transport offset 0x%x for TSO\n",
+                                  transport_offset);
+                       return false;
+               }
+
+               switch (get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       opts[0] |= TD1_GTSENV4;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       if (msdn_giant_send_check(skb))
+                               return false;
+
+                       opts[0] |= TD1_GTSENV6;
+                       break;
+
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
+
+               opts[0] |= transport_offset << GTTCPHO_SHIFT;
+               opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u8 ip_protocol;
+
                if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
                        return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
 
-               if (ip->protocol == IPPROTO_TCP)
-                       opts[offset] |= info->checksum.tcp;
-               else if (ip->protocol == IPPROTO_UDP)
-                       opts[offset] |= info->checksum.udp;
+               if (transport_offset > TCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->dev,
+                                  "Invalid transport offset 0x%x\n",
+                                  transport_offset);
+                       return false;
+               }
+
+               switch (get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       opts[1] |= TD1_IPv4_CS;
+                       ip_protocol = ip_hdr(skb)->protocol;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       opts[1] |= TD1_IPv6_CS;
+                       ip_protocol = ipv6_hdr(skb)->nexthdr;
+                       break;
+
+               default:
+                       ip_protocol = IPPROTO_RAW;
+                       break;
+               }
+
+               if (ip_protocol == IPPROTO_TCP)
+                       opts[1] |= TD1_TCP_CS;
+               else if (ip_protocol == IPPROTO_UDP)
+                       opts[1] |= TD1_UDP_CS;
                else
                        WARN_ON_ONCE(1);
+
+               opts[1] |= transport_offset << TCPHO_SHIFT;
        } else {
                if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
                        return rtl_skb_pad(skb);
        }
+
        return true;
 }
 
@@ -6021,8 +6154,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
        opts[0] = DescOwn;
 
-       if (!rtl8169_tso_csum(tp, skb, opts))
-               goto err_update_stats;
+       if (!tp->tso_csum(tp, skb, opts)) {
+               r8169_csum_workaround(tp, skb);
+               return NETDEV_TX_OK;
+       }
 
        len = skb_headlen(skb);
        mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
@@ -6087,7 +6222,6 @@ err_dma_1:
        rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
 err_dma_0:
        dev_kfree_skb_any(skb);
-err_update_stats:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 
@@ -7172,6 +7306,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                /* 8110SCd requires hardware Rx VLAN - disallow toggling */
                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (tp->txd_version == RTL_TD_0)
+               tp->tso_csum = rtl8169_tso_csum_v1;
+       else if (tp->txd_version == RTL_TD_1) {
+               tp->tso_csum = rtl8169_tso_csum_v2;
+               dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+       } else
+               WARN_ON_ONCE(1);
+
        dev->hw_features |= NETIF_F_RXALL;
        dev->hw_features |= NETIF_F_RXFCS;
 
index 7622213beef167e04a3a837c63c890935a8d703b..67b11c833870ed3a87d585e39e9e4bc156b10ba2 100644 (file)
@@ -1094,20 +1094,16 @@ static void sh_eth_ring_free(struct net_device *ndev)
 
        /* Free Rx skb ringbuffer */
        if (mdp->rx_skbuff) {
-               for (i = 0; i < mdp->num_rx_ring; i++) {
-                       if (mdp->rx_skbuff[i])
-                               dev_kfree_skb(mdp->rx_skbuff[i]);
-               }
+               for (i = 0; i < mdp->num_rx_ring; i++)
+                       dev_kfree_skb(mdp->rx_skbuff[i]);
        }
        kfree(mdp->rx_skbuff);
        mdp->rx_skbuff = NULL;
 
        /* Free Tx skb ringbuffer */
        if (mdp->tx_skbuff) {
-               for (i = 0; i < mdp->num_tx_ring; i++) {
-                       if (mdp->tx_skbuff[i])
-                               dev_kfree_skb(mdp->tx_skbuff[i]);
-               }
+               for (i = 0; i < mdp->num_tx_ring; i++)
+                       dev_kfree_skb(mdp->tx_skbuff[i]);
        }
        kfree(mdp->tx_skbuff);
        mdp->tx_skbuff = NULL;
@@ -2077,13 +2073,11 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
                rxdesc = &mdp->rx_ring[i];
                rxdesc->status = 0;
                rxdesc->addr = 0xBADF00D0;
-               if (mdp->rx_skbuff[i])
-                       dev_kfree_skb(mdp->rx_skbuff[i]);
+               dev_kfree_skb(mdp->rx_skbuff[i]);
                mdp->rx_skbuff[i] = NULL;
        }
        for (i = 0; i < mdp->num_tx_ring; i++) {
-               if (mdp->tx_skbuff[i])
-                       dev_kfree_skb(mdp->tx_skbuff[i]);
+               dev_kfree_skb(mdp->tx_skbuff[i]);
                mdp->tx_skbuff[i] = NULL;
        }
 
index b5ed30a3914486c10ebe9e004a3f7aa3d052db04..002d4cdc319fda80a064c7cea199e1bf417d479c 100644 (file)
@@ -755,6 +755,8 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
        { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 #define EF10_OTHER_STAT(ext_name)                              \
        [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(tx_bytes, TX_BYTES),
@@ -798,6 +800,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
        EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
        EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
        EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
@@ -841,7 +845,9 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
                               (1ULL << EF10_STAT_rx_gtjumbo) |         \
                               (1ULL << EF10_STAT_rx_bad_gtjumbo) |     \
                               (1ULL << EF10_STAT_rx_overflow) |        \
-                              (1ULL << EF10_STAT_rx_nodesc_drops))
+                              (1ULL << EF10_STAT_rx_nodesc_drops) |    \
+                              (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
+                              (1ULL << GENERIC_STAT_rx_noskb_drops))
 
 /* These statistics are only provided by the 10G MAC.  For a 10G/40G
  * switchable port we do not expose these because they might not
@@ -951,7 +957,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
                stats[EF10_STAT_rx_bytes_minus_good_bytes];
        efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
                             stats[EF10_STAT_rx_bytes_minus_good_bytes]);
-
+       efx_update_sw_stats(efx, stats);
        return 0;
 }
 
@@ -990,7 +996,9 @@ static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[EF10_STAT_tx_packets];
                core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
                core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+               core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[EF10_STAT_rx_multicast];
                core_stats->rx_length_errors =
                        stats[EF10_STAT_rx_gtjumbo] +
index 1e274045970fa011c6dacb6baeb77db95178b164..4b80c0be6e57f04ed03e54792f5b4e263aa1f7cc 100644 (file)
@@ -2607,6 +2607,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
         .driver_data = (unsigned long) &siena_a0_nic_type},
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
+        .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
        {0}                     /* end of list */
 };
 
@@ -2722,6 +2724,17 @@ static void efx_fini_struct(struct efx_nic *efx)
        }
 }
 
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
+{
+       u64 n_rx_nodesc_trunc = 0;
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx)
+               n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+       stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+       stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
 /**************************************************************************
  *
  * PCI interface
index 99032581336f8297e735bfd21a6bb83c57d9ba4e..b41601e052d6f66f5af0ec5ea88260bf0c2fc337 100644 (file)
@@ -199,6 +199,9 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
 int efx_port_dummy_op_int(struct efx_nic *efx);
 void efx_port_dummy_op_void(struct efx_nic *efx);
 
+/* Update the generic software stats in the passed stats array */
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
+
 /* MTD */
 #ifdef CONFIG_SFC_MTD
 int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
index 74739c4b9997e433b69571f99886290abed49098..cad258a787088b44074bbb37c5a0eeb2686eeb7a 100644 (file)
@@ -77,7 +77,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
-       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
 };
@@ -360,6 +359,37 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
        return n;
 }
 
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+       size_t n_stats = 0;
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_tx_queues(channel)) {
+                       n_stats++;
+                       if (strings != NULL) {
+                               snprintf(strings, ETH_GSTRING_LEN,
+                                        "tx-%u.tx_packets",
+                                        channel->tx_queue[0].queue /
+                                        EFX_TXQ_TYPES);
+
+                               strings += ETH_GSTRING_LEN;
+                       }
+               }
+       }
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_rx_queue(channel)) {
+                       n_stats++;
+                       if (strings != NULL) {
+                               snprintf(strings, ETH_GSTRING_LEN,
+                                        "rx-%d.rx_packets", channel->channel);
+                               strings += ETH_GSTRING_LEN;
+                       }
+               }
+       }
+       return n_stats;
+}
+
 static int efx_ethtool_get_sset_count(struct net_device *net_dev,
                                      int string_set)
 {
@@ -368,8 +398,9 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
        switch (string_set) {
        case ETH_SS_STATS:
                return efx->type->describe_stats(efx, NULL) +
-                       EFX_ETHTOOL_SW_STAT_COUNT +
-                       efx_ptp_describe_stats(efx, NULL);
+                      EFX_ETHTOOL_SW_STAT_COUNT +
+                      efx_describe_per_queue_stats(efx, NULL) +
+                      efx_ptp_describe_stats(efx, NULL);
        case ETH_SS_TEST:
                return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
        default:
@@ -391,6 +422,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
                        strlcpy(strings + i * ETH_GSTRING_LEN,
                                efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
                strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+               strings += (efx_describe_per_queue_stats(efx, strings) *
+                           ETH_GSTRING_LEN);
                efx_ptp_describe_stats(efx, strings);
                break;
        case ETH_SS_TEST:
@@ -410,6 +443,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
        const struct efx_sw_stat_desc *stat;
        struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
        int i;
 
        spin_lock_bh(&efx->stats_lock);
@@ -445,6 +479,25 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 
        spin_unlock_bh(&efx->stats_lock);
 
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_tx_queues(channel)) {
+                       *data = 0;
+                       efx_for_each_channel_tx_queue(tx_queue, channel) {
+                               *data += tx_queue->tx_packets;
+                       }
+                       data++;
+               }
+       }
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_rx_queue(channel)) {
+                       *data = 0;
+                       efx_for_each_channel_rx_queue(rx_queue, channel) {
+                               *data += rx_queue->rx_packets;
+                       }
+                       data++;
+               }
+       }
+
        efx_ptp_update_stats(efx, data);
 }
 
index fae25a41864797c68a7e7cedcf1607a6f49eb741..157037546d3067c4fe79c7f38d981cee235aa76a 100644 (file)
          hw_name ## _ ## offset }
 #define FALCON_OTHER_STAT(ext_name)                                    \
        [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
        FALCON_DMA_STAT(tx_bytes, XgTxOctets),
@@ -191,6 +193,8 @@ static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
        FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
        FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
        FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
 };
 static const unsigned long falcon_stat_mask[] = {
        [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
@@ -2574,6 +2578,7 @@ static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                                     stats[FALCON_STAT_rx_bytes] -
                                     stats[FALCON_STAT_rx_good_bytes] -
                                     stats[FALCON_STAT_rx_control] * 64);
+               efx_update_sw_stats(efx, stats);
        }
 
        if (full_stats)
@@ -2584,7 +2589,9 @@ static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
                core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
                core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+               core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[FALCON_STAT_rx_multicast];
                core_stats->rx_length_errors =
                        stats[FALCON_STAT_rx_gtjumbo] +
index e5fc4e1574b53f977e8509652aeb67e7404ca757..fb19b70eac0118b6bb1946cf9796d3721baddfe0 100644 (file)
@@ -183,6 +183,8 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
                        result |= SUPPORTED_1000baseKX_Full;
                if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
                        result |= SUPPORTED_10000baseKX4_Full;
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       result |= SUPPORTED_40000baseKR4_Full;
                break;
 
        case MC_CMD_MEDIA_XFP:
@@ -190,6 +192,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
                result |= SUPPORTED_FIBRE;
                break;
 
+       case MC_CMD_MEDIA_QSFP_PLUS:
+               result |= SUPPORTED_FIBRE;
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       result |= SUPPORTED_40000baseCR4_Full;
+               break;
+
        case MC_CMD_MEDIA_BASE_T:
                result |= SUPPORTED_TP;
                if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
@@ -237,6 +245,8 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
                result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
        if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
                result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+       if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+               result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
        if (cap & SUPPORTED_Pause)
                result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
        if (cap & SUPPORTED_Asym_Pause)
@@ -285,6 +295,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
 
        case MC_CMD_MEDIA_XFP:
        case MC_CMD_MEDIA_SFP_PLUS:
+       case MC_CMD_MEDIA_QSFP_PLUS:
                return PORT_FIBRE;
 
        case MC_CMD_MEDIA_BASE_T:
index 5bdae8ed7c5734fde156d8c7d6317d4d839bfc5d..fb2e3bfeb2c2eea3a9a87e128730126cab620445 100644 (file)
@@ -249,6 +249,8 @@ struct efx_tx_queue {
        unsigned int tso_packets;
        unsigned int pushes;
        unsigned int pio_packets;
+       /* Statistics to supplement MAC stats */
+       unsigned long tx_packets;
 
        /* Members shared between paths and sometimes updated */
        unsigned int empty_read_count ____cacheline_aligned_in_smp;
@@ -358,6 +360,8 @@ struct efx_rx_queue {
        unsigned int recycle_count;
        struct timer_list slow_fill;
        unsigned int slow_fill_count;
+       /* Statistics to supplement MAC stats */
+       unsigned long rx_packets;
 };
 
 enum efx_sync_events_state {
@@ -777,6 +781,7 @@ struct vfdi_status;
  *     interrupt has occurred.
  * @stats_lock: Statistics update lock. Must be held when calling
  *     efx_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -930,6 +935,7 @@ struct efx_nic {
        spinlock_t biu_lock;
        int last_irq_cpu;
        spinlock_t stats_lock;
+       atomic_t n_rx_noskb_drops;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
index d3ad8ed8d901a93eb4e0908208949b9d7e7a0ba8..60f85149fc4ca8f4531e2f94b6d3209c9c17e5f4 100644 (file)
@@ -135,6 +135,13 @@ enum {
 /* Size and alignment of buffer table entries (same) */
 #define EFX_BUF_SIZE   EFX_PAGE_SIZE
 
+/* NIC-generic software stats */
+enum {
+       GENERIC_STAT_rx_noskb_drops,
+       GENERIC_STAT_rx_nodesc_trunc,
+       GENERIC_STAT_COUNT
+};
+
 /**
  * struct falcon_board_type - board operations and type information
  * @id: Board type id, as found in NVRAM
@@ -205,7 +212,7 @@ static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
 }
 
 enum {
-       FALCON_STAT_tx_bytes,
+       FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
        FALCON_STAT_tx_packets,
        FALCON_STAT_tx_pause,
        FALCON_STAT_tx_control,
@@ -290,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 }
 
 enum {
-       SIENA_STAT_tx_bytes,
+       SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
        SIENA_STAT_tx_good_bytes,
        SIENA_STAT_tx_bad_bytes,
        SIENA_STAT_tx_packets,
@@ -361,7 +368,7 @@ struct siena_nic_data {
 };
 
 enum {
-       EF10_STAT_tx_bytes,
+       EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
        EF10_STAT_tx_packets,
        EF10_STAT_tx_pause,
        EF10_STAT_tx_control,
index 48588ddf81b0aaacd2d233f541dedee4fd31a9c5..a7bb63a7a5217e64672bd1a26fcbe3f8c48706e6 100644 (file)
@@ -480,8 +480,10 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
        skb = netdev_alloc_skb(efx->net_dev,
                               efx->rx_ip_align + efx->rx_prefix_size +
                               hdr_len);
-       if (unlikely(skb == NULL))
+       if (unlikely(skb == NULL)) {
+               atomic_inc(&efx->n_rx_noskb_drops);
                return NULL;
+       }
 
        EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
 
@@ -528,6 +530,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        struct efx_rx_buffer *rx_buf;
 
+       rx_queue->rx_packets++;
+
        rx_buf = efx_rx_buffer(rx_queue, index);
        rx_buf->flags |= flags;
 
index 50ffefed492c1129f3fdf4d2011f0036b2ee4078..ae696855f21acafbe4f38d55678f70722da5c66e 100644 (file)
@@ -424,6 +424,8 @@ static void siena_remove_nic(struct efx_nic *efx)
        { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 #define SIENA_OTHER_STAT(ext_name)                             \
        [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
        SIENA_DMA_STAT(tx_bytes, TX_BYTES),
@@ -483,6 +485,8 @@ static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
        SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
        SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
 };
 static const unsigned long siena_stat_mask[] = {
        [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
@@ -528,6 +532,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
        efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
                             stats[SIENA_STAT_rx_bytes] -
                             stats[SIENA_STAT_rx_bad_bytes]);
+       efx_update_sw_stats(efx, stats);
        return 0;
 }
 
@@ -554,7 +559,9 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
                core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
                core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+               core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[SIENA_STAT_rx_multicast];
                core_stats->collisions = stats[SIENA_STAT_tx_collision];
                core_stats->rx_length_errors =
index ede8dcca0ff3516c33a2aee884952b7514924676..283e5f87b09f668729c44a6b67ff1845e182f9f5 100644 (file)
@@ -452,6 +452,8 @@ finish_packet:
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       tx_queue->tx_packets++;
+
        efx_tx_maybe_stop_queue(tx_queue);
 
        return NETDEV_TX_OK;
@@ -1245,6 +1247,8 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 
        ++tx_queue->tso_packets;
 
+       ++tx_queue->tx_packets;
+
        return 0;
 }
 
index 6072f093e6b46618c0724f6a7ed1436d7f3c50f8..7bea17c41dc9824c120330f5b5bd0902e68afe55 100644 (file)
@@ -2258,7 +2258,6 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
                case IF_PORT_100BASEFX: /* 100BaseFx */
                        /* These Modes are not supported (are they?)*/
                        return -EOPNOTSUPP;
-                       break;
 
                default:
                        return -EINVAL;
index 79606f47a08e0989396203e5ba54485dba33a04f..db8ffde491b58f1c1215cf9beaa21d239b442d5f 100644 (file)
@@ -2584,7 +2584,6 @@ static int niu_determine_phy_disposition(struct niu *np)
                                break;
                        default:
                                return -EINVAL;
-                               break;
                        }
                        phy_addr_off = niu_atca_port_num[np->port];
                        break;
index 53150c25a96bd455f58dc30ca3c592b6a598461e..1769700a6070bb238c9895d38c2d379d67375504 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_TI
        bool "Texas Instruments (TI) devices"
        default y
-       depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
+       depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE))
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
 
 config TI_DAVINCI_MDIO
        tristate "TI DaVinci MDIO Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE )
        select PHYLIB
        ---help---
          This driver supports TI's DaVinci MDIO module.
index 7399a52f7c260aa7a037cbe134b35ca82b6cf840..3809f4ec28202db36e2e91bf15d5ee73633e1294 100644 (file)
@@ -67,42 +67,42 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 #define CPMAC_RX_CONTROL               0x0014
 #define CPMAC_RX_TEARDOWN              0x0018
 #define CPMAC_MBP                      0x0100
-# define MBP_RXPASSCRC                 0x40000000
-# define MBP_RXQOS                     0x20000000
-# define MBP_RXNOCHAIN                 0x10000000
-# define MBP_RXCMF                     0x01000000
-# define MBP_RXSHORT                   0x00800000
-# define MBP_RXCEF                     0x00400000
-# define MBP_RXPROMISC                 0x00200000
-# define MBP_PROMISCCHAN(channel)      (((channel) & 0x7) << 16)
-# define MBP_RXBCAST                   0x00002000
-# define MBP_BCASTCHAN(channel)                (((channel) & 0x7) << 8)
-# define MBP_RXMCAST                   0x00000020
-# define MBP_MCASTCHAN(channel)                ((channel) & 0x7)
+#define MBP_RXPASSCRC                  0x40000000
+#define MBP_RXQOS                      0x20000000
+#define MBP_RXNOCHAIN                  0x10000000
+#define MBP_RXCMF                      0x01000000
+#define MBP_RXSHORT                    0x00800000
+#define MBP_RXCEF                      0x00400000
+#define MBP_RXPROMISC                  0x00200000
+#define MBP_PROMISCCHAN(channel)       (((channel) & 0x7) << 16)
+#define MBP_RXBCAST                    0x00002000
+#define MBP_BCASTCHAN(channel)         (((channel) & 0x7) << 8)
+#define MBP_RXMCAST                    0x00000020
+#define MBP_MCASTCHAN(channel)         ((channel) & 0x7)
 #define CPMAC_UNICAST_ENABLE           0x0104
 #define CPMAC_UNICAST_CLEAR            0x0108
 #define CPMAC_MAX_LENGTH               0x010c
 #define CPMAC_BUFFER_OFFSET            0x0110
 #define CPMAC_MAC_CONTROL              0x0160
-# define MAC_TXPTYPE                   0x00000200
-# define MAC_TXPACE                    0x00000040
-# define MAC_MII                       0x00000020
-# define MAC_TXFLOW                    0x00000010
-# define MAC_RXFLOW                    0x00000008
-# define MAC_MTEST                     0x00000004
-# define MAC_LOOPBACK                  0x00000002
-# define MAC_FDX                       0x00000001
+#define MAC_TXPTYPE                    0x00000200
+#define MAC_TXPACE                     0x00000040
+#define MAC_MII                                0x00000020
+#define MAC_TXFLOW                     0x00000010
+#define MAC_RXFLOW                     0x00000008
+#define MAC_MTEST                      0x00000004
+#define MAC_LOOPBACK                   0x00000002
+#define MAC_FDX                                0x00000001
 #define CPMAC_MAC_STATUS               0x0164
-# define MAC_STATUS_QOS                        0x00000004
-# define MAC_STATUS_RXFLOW             0x00000002
-# define MAC_STATUS_TXFLOW             0x00000001
+#define MAC_STATUS_QOS                 0x00000004
+#define MAC_STATUS_RXFLOW              0x00000002
+#define MAC_STATUS_TXFLOW              0x00000001
 #define CPMAC_TX_INT_ENABLE            0x0178
 #define CPMAC_TX_INT_CLEAR             0x017c
 #define CPMAC_MAC_INT_VECTOR           0x0180
-# define MAC_INT_STATUS                        0x00080000
-# define MAC_INT_HOST                  0x00040000
-# define MAC_INT_RX                    0x00020000
-# define MAC_INT_TX                    0x00010000
+#define MAC_INT_STATUS                 0x00080000
+#define MAC_INT_HOST                   0x00040000
+#define MAC_INT_RX                     0x00020000
+#define MAC_INT_TX                     0x00010000
 #define CPMAC_MAC_EOI_VECTOR           0x0184
 #define CPMAC_RX_INT_ENABLE            0x0198
 #define CPMAC_RX_INT_CLEAR             0x019c
@@ -118,8 +118,8 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 #define CPMAC_TX_ACK(channel)          (0x0640 + (channel) * 4)
 #define CPMAC_RX_ACK(channel)          (0x0660 + (channel) * 4)
 #define CPMAC_REG_END                  0x0680
-/*
- * Rx/Tx statistics
+
+/* Rx/Tx statistics
  * TODO: use some of them to fill stats in cpmac_stats()
  */
 #define CPMAC_STATS_RX_GOOD            0x0200
@@ -157,24 +157,24 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 /* MDIO bus */
 #define CPMAC_MDIO_VERSION             0x0000
 #define CPMAC_MDIO_CONTROL             0x0004
-# define MDIOC_IDLE                    0x80000000
-# define MDIOC_ENABLE                  0x40000000
-# define MDIOC_PREAMBLE                        0x00100000
-# define MDIOC_FAULT                   0x00080000
-# define MDIOC_FAULTDETECT             0x00040000
-# define MDIOC_INTTEST                 0x00020000
-# define MDIOC_CLKDIV(div)             ((div) & 0xff)
+#define MDIOC_IDLE                     0x80000000
+#define MDIOC_ENABLE                   0x40000000
+#define MDIOC_PREAMBLE                 0x00100000
+#define MDIOC_FAULT                    0x00080000
+#define MDIOC_FAULTDETECT              0x00040000
+#define MDIOC_INTTEST                  0x00020000
+#define MDIOC_CLKDIV(div)              ((div) & 0xff)
 #define CPMAC_MDIO_ALIVE               0x0008
 #define CPMAC_MDIO_LINK                        0x000c
 #define CPMAC_MDIO_ACCESS(channel)     (0x0080 + (channel) * 8)
-# define MDIO_BUSY                     0x80000000
-# define MDIO_WRITE                    0x40000000
-# define MDIO_REG(reg)                 (((reg) & 0x1f) << 21)
-# define MDIO_PHY(phy)                 (((phy) & 0x1f) << 16)
-# define MDIO_DATA(data)               ((data) & 0xffff)
+#define MDIO_BUSY                      0x80000000
+#define MDIO_WRITE                     0x40000000
+#define MDIO_REG(reg)                  (((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy)                  (((phy) & 0x1f) << 16)
+#define MDIO_DATA(data)                        ((data) & 0xffff)
 #define CPMAC_MDIO_PHYSEL(channel)     (0x0084 + (channel) * 8)
-# define PHYSEL_LINKSEL                        0x00000040
-# define PHYSEL_LINKINT                        0x00000020
+#define PHYSEL_LINKSEL                 0x00000040
+#define PHYSEL_LINKINT                 0x00000020
 
 struct cpmac_desc {
        u32 hw_next;
@@ -224,12 +224,12 @@ static void cpmac_dump_regs(struct net_device *dev)
 {
        int i;
        struct cpmac_priv *priv = netdev_priv(dev);
+
        for (i = 0; i < CPMAC_REG_END; i += 4) {
                if (i % 16 == 0) {
                        if (i)
-                               pr_cont("\n");
-                       printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
-                              priv->regs + i);
+                               printk("\n");
+                       printk("%s: reg[%p]:", dev->name, priv->regs + i);
                }
                printk(" %08x", cpmac_read(priv->regs, i));
        }
@@ -239,7 +239,8 @@ static void cpmac_dump_regs(struct net_device *dev)
 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
 {
        int i;
-       printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
+
+       printk("%s: desc[%p]:", dev->name, desc);
        for (i = 0; i < sizeof(*desc) / 4; i++)
                printk(" %08x", ((u32 *)desc)[i]);
        printk("\n");
@@ -249,6 +250,7 @@ static void cpmac_dump_all_desc(struct net_device *dev)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
        struct cpmac_desc *dump = priv->rx_head;
+
        do {
                cpmac_dump_desc(dev, dump);
                dump = dump->next;
@@ -258,13 +260,13 @@ static void cpmac_dump_all_desc(struct net_device *dev)
 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
 {
        int i;
-       printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+
+       printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
        for (i = 0; i < skb->len; i++) {
                if (i % 16 == 0) {
                        if (i)
-                               pr_cont("\n");
-                       printk(KERN_DEBUG "%s: data[%p]:", dev->name,
-                              skb->data + i);
+                               printk("\n");
+                       printk("%s: data[%p]:", dev->name, skb->data + i);
                }
                printk(" %02x", ((u8 *)skb->data)[i]);
        }
@@ -281,6 +283,7 @@ static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
                    MDIO_PHY(phy_id));
        while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
                cpu_relax();
+
        return MDIO_DATA(val);
 }
 
@@ -291,6 +294,7 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
                cpu_relax();
        cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
                    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+
        return 0;
 }
 
@@ -300,12 +304,13 @@ static int cpmac_mdio_reset(struct mii_bus *bus)
 
        cpmac_clk = clk_get(&bus->dev, "cpmac");
        if (IS_ERR(cpmac_clk)) {
-               printk(KERN_ERR "unable to get cpmac clock\n");
+               pr_err("unable to get cpmac clock\n");
                return -1;
        }
        ar7_device_reset(AR7_RESET_BIT_MDIO);
        cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
                    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+
        return 0;
 }
 
@@ -331,8 +336,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
                        cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
                        cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
                } else {
-                       /*
-                        * cpmac uses some strange mac address hashing
+                       /* cpmac uses some strange mac address hashing
                         * (not crc32)
                         */
                        netdev_for_each_mc_addr(ha, dev) {
@@ -369,8 +373,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
        cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
        if (unlikely(!desc->datalen)) {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx: spurious interrupt\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx: spurious interrupt\n");
+
                return NULL;
        }
 
@@ -390,15 +394,14 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
                                                    DMA_FROM_DEVICE);
                desc->hw_data = (u32)desc->data_mapping;
                if (unlikely(netif_msg_pktdata(priv))) {
-                       printk(KERN_DEBUG "%s: received packet:\n",
-                              priv->dev->name);
+                       netdev_dbg(priv->dev, "received packet:\n");
                        cpmac_dump_skb(priv->dev, result);
                }
        } else {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING
-                              "%s: low on skbs, dropping packet\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev,
+                                   "low on skbs, dropping packet\n");
+
                priv->dev->stats.rx_dropped++;
        }
 
@@ -418,8 +421,8 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
        spin_lock(&priv->rx_lock);
        if (unlikely(!priv->rx_head)) {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx: polling, but no queue\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx: polling, but no queue\n");
+
                spin_unlock(&priv->rx_lock);
                napi_complete(napi);
                return 0;
@@ -432,15 +435,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
                if ((desc->dataflags & CPMAC_EOQ) != 0) {
                        /* The last update to eoq->hw_next didn't happen
-                       * soon enough, and the receiver stopped here.
-                       *Remember this descriptor so we can restart
-                       * the receiver after freeing some space.
-                       */
+                        * soon enough, and the receiver stopped here.
+                        * Remember this descriptor so we can restart
+                        * the receiver after freeing some space.
+                        */
                        if (unlikely(restart)) {
                                if (netif_msg_rx_err(priv))
-                                       printk(KERN_ERR "%s: poll found a"
-                                               " duplicate EOQ: %p and %p\n",
-                                               priv->dev->name, restart, desc);
+                                       netdev_err(priv->dev, "poll found a"
+                                                  " duplicate EOQ: %p and %p\n",
+                                                  restart, desc);
                                goto fatal_error;
                        }
 
@@ -457,25 +460,27 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
        if (desc != priv->rx_head) {
                /* We freed some buffers, but not the whole ring,
-                * add what we did free to the rx list */
+                * add what we did free to the rx list
+                */
                desc->prev->hw_next = (u32)0;
                priv->rx_head->prev->hw_next = priv->rx_head->mapping;
        }
 
        /* Optimization: If we did not actually process an EOQ (perhaps because
         * of quota limits), check to see if the tail of the queue has EOQ set.
-       * We should immediately restart in that case so that the receiver can
-       * restart and run in parallel with more packet processing.
-       * This lets us handle slightly larger bursts before running
-       * out of ring space (assuming dev->weight < ring_size) */
+        * We should immediately restart in that case so that the receiver can
+        * restart and run in parallel with more packet processing.
+        * This lets us handle slightly larger bursts before running
+        * out of ring space (assuming dev->weight < ring_size)
+        */
 
        if (!restart &&
             (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
                    == CPMAC_EOQ &&
             (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
                /* reset EOQ so the poll loop (above) doesn't try to
-               * restart this when it eventually gets to this descriptor.
-               */
+                * restart this when it eventually gets to this descriptor.
+                */
                priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
                restart = priv->rx_head;
        }
@@ -484,15 +489,13 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
                priv->dev->stats.rx_errors++;
                priv->dev->stats.rx_fifo_errors++;
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx dma ring overrun\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx dma ring overrun\n");
 
                if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
                        if (netif_msg_drv(priv))
-                               printk(KERN_ERR "%s: cpmac_poll is trying to "
-                                       "restart rx from a descriptor that's "
-                                       "not free: %p\n",
-                                       priv->dev->name, restart);
+                               netdev_err(priv->dev, "cpmac_poll is trying "
+                                       "to restart rx from a descriptor "
+                                       "that's not free: %p\n", restart);
                        goto fatal_error;
                }
 
@@ -502,11 +505,12 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
        priv->rx_head = desc;
        spin_unlock(&priv->rx_lock);
        if (unlikely(netif_msg_rx_status(priv)))
-               printk(KERN_DEBUG "%s: poll processed %d packets\n",
-                      priv->dev->name, received);
+               netdev_dbg(priv->dev, "poll processed %d packets\n", received);
+
        if (processed == 0) {
                /* we ran out of packets to read,
-                * revert to interrupt-driven mode */
+                * revert to interrupt-driven mode
+                */
                napi_complete(napi);
                cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
                return 0;
@@ -516,16 +520,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
 fatal_error:
        /* Something went horribly wrong.
-        * Reset hardware to try to recover rather than wedging. */
-
+        * Reset hardware to try to recover rather than wedging.
+        */
        if (netif_msg_drv(priv)) {
-               printk(KERN_ERR "%s: cpmac_poll is confused. "
-                               "Resetting hardware\n", priv->dev->name);
+               netdev_err(priv->dev, "cpmac_poll is confused. "
+                          "Resetting hardware\n");
                cpmac_dump_all_desc(priv->dev);
-               printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
-                       priv->dev->name,
-                       cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
-                       cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+               netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+                          cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+                          cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
        }
 
        spin_unlock(&priv->rx_lock);
@@ -537,6 +540,7 @@ fatal_error:
        cpmac_hw_stop(priv->dev);
        if (!schedule_work(&priv->reset_work))
                atomic_dec(&priv->reset_pending);
+
        return 0;
 
 }
@@ -560,8 +564,8 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
        desc = &priv->desc_ring[queue];
        if (unlikely(desc->dataflags & CPMAC_OWN)) {
                if (netif_msg_tx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: tx dma ring full\n",
-                              dev->name);
+                       netdev_warn(dev, "tx dma ring full\n");
+
                return NETDEV_TX_BUSY;
        }
 
@@ -575,8 +579,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
        desc->datalen = len;
        desc->buflen = len;
        if (unlikely(netif_msg_tx_queued(priv)))
-               printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
-                      skb->len);
+               netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
        if (unlikely(netif_msg_hw(priv)))
                cpmac_dump_desc(dev, desc);
        if (unlikely(netif_msg_pktdata(priv)))
@@ -602,8 +605,8 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
                                 DMA_TO_DEVICE);
 
                if (unlikely(netif_msg_tx_done(priv)))
-                       printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
-                              desc->skb, desc->skb->len);
+                       netdev_dbg(dev, "sent 0x%p, len=%d\n",
+                                  desc->skb, desc->skb->len);
 
                dev_kfree_skb_irq(desc->skb);
                desc->skb = NULL;
@@ -611,8 +614,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
                        netif_wake_subqueue(dev, queue);
        } else {
                if (netif_msg_tx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING
-                              "%s: end_xmit: spurious interrupt\n", dev->name);
+                       netdev_warn(dev, "end_xmit: spurious interrupt\n");
                if (__netif_subqueue_stopped(dev, queue))
                        netif_wake_subqueue(dev, queue);
        }
@@ -687,14 +689,14 @@ static void cpmac_clear_rx(struct net_device *dev)
        struct cpmac_priv *priv = netdev_priv(dev);
        struct cpmac_desc *desc;
        int i;
+
        if (unlikely(!priv->rx_head))
                return;
        desc = priv->rx_head;
        for (i = 0; i < priv->ring_size; i++) {
                if ((desc->dataflags & CPMAC_OWN) == 0) {
                        if (netif_msg_rx_err(priv) && net_ratelimit())
-                               printk(KERN_WARNING "%s: packet dropped\n",
-                                      dev->name);
+                               netdev_warn(dev, "packet dropped\n");
                        if (unlikely(netif_msg_hw(priv)))
                                cpmac_dump_desc(dev, desc);
                        desc->dataflags = CPMAC_OWN;
@@ -710,6 +712,7 @@ static void cpmac_clear_tx(struct net_device *dev)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
        int i;
+
        if (unlikely(!priv->desc_ring))
                return;
        for (i = 0; i < CPMAC_QUEUES; i++) {
@@ -751,16 +754,16 @@ static void cpmac_check_status(struct net_device *dev)
        if (rx_code || tx_code) {
                if (netif_msg_drv(priv) && net_ratelimit()) {
                        /* Can't find any documentation on what these
-                        *error codes actually are. So just log them and hope..
+                        * error codes actually are. So just log them and hope..
                         */
                        if (rx_code)
-                               printk(KERN_WARNING "%s: host error %d on rx "
-                                    "channel %d (macstatus %08x), resetting\n",
-                                    dev->name, rx_code, rx_channel, macstatus);
+                               netdev_warn(dev, "host error %d on rx "
+                                       "channel %d (macstatus %08x), resetting\n",
+                                       rx_code, rx_channel, macstatus);
                        if (tx_code)
-                               printk(KERN_WARNING "%s: host error %d on tx "
-                                    "channel %d (macstatus %08x), resetting\n",
-                                    dev->name, tx_code, tx_channel, macstatus);
+                               netdev_warn(dev, "host error %d on tx "
+                                       "channel %d (macstatus %08x), resetting\n",
+                                       tx_code, tx_channel, macstatus);
                }
 
                netif_tx_stop_all_queues(dev);
@@ -785,8 +788,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
        status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
 
        if (unlikely(netif_msg_intr(priv)))
-               printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
-                      status);
+               netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
 
        if (status & MAC_INT_TX)
                cpmac_end_xmit(dev, (status & 7));
@@ -815,7 +817,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
        dev->stats.tx_errors++;
        spin_unlock(&priv->lock);
        if (netif_msg_tx_err(priv) && net_ratelimit())
-               printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
+               netdev_warn(dev, "transmit timeout\n");
 
        atomic_inc(&priv->reset_pending);
        barrier();
@@ -829,6 +831,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
+
        if (!(netif_running(dev)))
                return -EINVAL;
        if (!priv->phy)
@@ -884,6 +887,7 @@ static int cpmac_set_ringparam(struct net_device *dev,
        if (netif_running(dev))
                return -EBUSY;
        priv->ring_size = ring->rx_pending;
+
        return 0;
 }
 
@@ -951,8 +955,8 @@ static int cpmac_open(struct net_device *dev)
        mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
        if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to request registers\n",
-                              dev->name);
+                       netdev_err(dev, "failed to request registers\n");
+
                res = -ENXIO;
                goto fail_reserve;
        }
@@ -960,8 +964,8 @@ static int cpmac_open(struct net_device *dev)
        priv->regs = ioremap(mem->start, resource_size(mem));
        if (!priv->regs) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to remap registers\n",
-                              dev->name);
+                       netdev_err(dev, "failed to remap registers\n");
+
                res = -ENXIO;
                goto fail_remap;
        }
@@ -1003,8 +1007,8 @@ static int cpmac_open(struct net_device *dev)
        res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
        if (res) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to obtain irq\n",
-                              dev->name);
+                       netdev_err(dev, "failed to obtain irq\n");
+
                goto fail_irq;
        }
 
@@ -1077,6 +1081,7 @@ static int cpmac_stop(struct net_device *dev)
        dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
                          (CPMAC_QUEUES + priv->ring_size),
                          priv->desc_ring, priv->dma_ring);
+
        return 0;
 }
 
@@ -1121,7 +1126,7 @@ static int cpmac_probe(struct platform_device *pdev)
 
        if (phy_id == PHY_MAX_ADDR) {
                dev_err(&pdev->dev, "no PHY present, falling back "
-                                       "to switch on MDIO bus 0\n");
+                       "to switch on MDIO bus 0\n");
                strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
                phy_id = pdev->id;
        }
@@ -1137,7 +1142,7 @@ static int cpmac_probe(struct platform_device *pdev)
        mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
        if (!mem) {
                rc = -ENODEV;
-               goto fail;
+               goto out;
        }
 
        dev->irq = platform_get_irq_byname(pdev, "irq");
@@ -1162,44 +1167,48 @@ static int cpmac_probe(struct platform_device *pdev)
 
        if (IS_ERR(priv->phy)) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: Could not attach to PHY\n",
-                              dev->name);
+                       dev_err(&pdev->dev, "Could not attach to PHY\n");
+
                rc = PTR_ERR(priv->phy);
-               goto fail;
+               goto out;
        }
 
        rc = register_netdev(dev);
        if (rc) {
-               printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
-                      dev->name);
+               dev_err(&pdev->dev, "Could not register net device\n");
                goto fail;
        }
 
        if (netif_msg_probe(priv)) {
-               printk(KERN_INFO
-                      "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
-                      "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
-                      priv->phy_name, dev->dev_addr);
+               dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
+                        "mac: %pM\n", (void *)mem->start, dev->irq,
+                        priv->phy_name, dev->dev_addr);
        }
+
        return 0;
 
 fail:
        free_netdev(dev);
+out:
        return rc;
 }
 
 static int cpmac_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
+
        unregister_netdev(dev);
        free_netdev(dev);
+
        return 0;
 }
 
 static struct platform_driver cpmac_driver = {
-       .driver.name = "cpmac",
-       .driver.owner = THIS_MODULE,
-       .probe = cpmac_probe,
+       .driver = {
+               .name   = "cpmac",
+               .owner  = THIS_MODULE,
+       },
+       .probe  = cpmac_probe,
        .remove = cpmac_remove,
 };
 
@@ -1221,7 +1230,7 @@ int cpmac_init(void)
        cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
 
        if (!cpmac_mii->priv) {
-               printk(KERN_ERR "Can't ioremap mdio registers\n");
+               pr_err("Can't ioremap mdio registers\n");
                res = -ENXIO;
                goto fail_alloc;
        }
index b988d16cd34e2e940555b3041d4f34530bf86d93..ae6379af5b4dac5bab01b8e9aa7d61699d258f15 100644 (file)
@@ -884,14 +884,16 @@ static int cpsw_set_coalesce(struct net_device *ndev,
        u32 addnl_dvdr = 1;
        u32 coal_intvl = 0;
 
-       if (!coal->rx_coalesce_usecs)
-               return -EINVAL;
-
        coal_intvl = coal->rx_coalesce_usecs;
 
        int_ctrl =  readl(&priv->wr_regs->int_control);
        prescale = priv->bus_freq_mhz * 4;
 
+       if (!coal->rx_coalesce_usecs) {
+               int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+               goto update_return;
+       }
+
        if (coal_intvl < CPSW_CMINTMIN_INTVL)
                coal_intvl = CPSW_CMINTMIN_INTVL;
 
@@ -919,6 +921,8 @@ static int cpsw_set_coalesce(struct net_device *ndev,
        int_ctrl |= CPSW_INTPACEEN;
        int_ctrl &= (~CPSW_INTPRESCALE_MASK);
        int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
        writel(int_ctrl, &priv->wr_regs->int_control);
 
        cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
index 6b56f85951e581826afc152109d0eee4b53dd08d..ab92f67da035f2f5f9aaa8ea4effa87ca83a2a41 100644 (file)
@@ -256,23 +256,21 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
                      u16 ts_seqid, u8 ts_msgtype)
 {
        u16 *seqid;
-       unsigned int offset;
+       unsigned int offset = 0;
        u8 *msgtype, *data = skb->data;
 
-       switch (ptp_class) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (ptp_class & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (ptp_class & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
index 735dc53d4b0163be05eec83c83a4fb8bb4612497..2791f6f2db1178e8f5ce0e0f2a9ecad9f87cffba 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/davinci_emac.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 #include <linux/pinctrl/consumer.h>
 
 /*
@@ -95,6 +96,10 @@ struct davinci_mdio_data {
        struct mii_bus  *bus;
        bool            suspended;
        unsigned long   access_time; /* jiffies */
+       /* Indicates that driver shouldn't modify phy_mask in case
+        * if MDIO bus is registered from DT.
+        */
+       bool            skip_scan;
 };
 
 static void __davinci_mdio_reset(struct davinci_mdio_data *data)
@@ -144,6 +149,9 @@ static int davinci_mdio_reset(struct mii_bus *bus)
        dev_info(data->dev, "davinci mdio revision %d.%d\n",
                 (ver >> 8) & 0xff, ver & 0xff);
 
+       if (data->skip_scan)
+               return 0;
+
        /* get phy mask from the alive register */
        phy_mask = __raw_readl(&data->regs->alive);
        if (phy_mask) {
@@ -369,8 +377,17 @@ static int davinci_mdio_probe(struct platform_device *pdev)
                goto bail_out;
        }
 
-       /* register the mii bus */
-       ret = mdiobus_register(data->bus);
+       /* register the mii bus
+        * Create PHYs from DT only in case if PHY child nodes are explicitly
+        * defined to support backward compatibility with DTs which assume that
+        * Davinci MDIO will always scan the bus for PHYs detection.
+        */
+       if (dev->of_node && of_get_child_count(dev->of_node)) {
+               data->skip_scan = true;
+               ret = of_mdiobus_register(data->bus, dev->of_node);
+       } else {
+               ret = mdiobus_register(data->bus);
+       }
        if (ret)
                goto bail_out;
 
index 62b19be5183d3349433ee5855ad082b1560b5c69..6078342fe3f24de7fe8df95ee806cf28a26f6a6b 100644 (file)
@@ -69,10 +69,6 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
 MODULE_LICENSE("GPL");
 
-
-/* Define this to enable Link beat monitoring */
-#undef MONITOR
-
 /* Turn on debugging. See Documentation/networking/tlan.txt for details */
 static  int            debug;
 module_param(debug, int, 0);
@@ -107,8 +103,10 @@ static struct board {
        { "Compaq Netelligent 10/100 TX Embedded UTP",
          TLAN_ADAPTER_NONE, 0x83 },
        { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
-       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+       { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+       { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
        { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
        { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/E",
@@ -192,9 +190,7 @@ static void tlan_phy_power_up(struct net_device *);
 static void    tlan_phy_reset(struct net_device *);
 static void    tlan_phy_start_link(struct net_device *);
 static void    tlan_phy_finish_auto_neg(struct net_device *);
-#ifdef MONITOR
-static void     tlan_phy_monitor(struct net_device *);
-#endif
+static void     tlan_phy_monitor(unsigned long);
 
 /*
   static int   tlan_phy_nop(struct net_device *);
@@ -337,6 +333,7 @@ static void tlan_stop(struct net_device *dev)
 {
        struct tlan_priv *priv = netdev_priv(dev);
 
+       del_timer_sync(&priv->media_timer);
        tlan_read_and_clear_stats(dev, TLAN_RECORD);
        outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
        /* Reset and power down phy */
@@ -368,8 +365,10 @@ static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
 static int tlan_resume(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
+       int rc = pci_enable_device(pdev);
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (rc)
+               return rc;
        pci_restore_state(pdev);
        pci_enable_wake(pdev, PCI_D0, 0);
        netif_device_attach(dev);
@@ -781,7 +780,43 @@ static const struct net_device_ops tlan_netdev_ops = {
 #endif
 };
 
+static void tlan_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       struct tlan_priv *priv = netdev_priv(dev);
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       if (priv->pci_dev)
+               strlcpy(info->bus_info, pci_name(priv->pci_dev),
+                       sizeof(info->bus_info));
+       else
+               strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+       info->eedump_len = TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom_len(struct net_device *dev)
+{
+       return TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom, u8 *data)
+{
+       int i;
+
+       for (i = 0; i < TLAN_EEPROM_SIZE; i++)
+               if (tlan_ee_read_byte(dev, i, &data[i]))
+                       return -EIO;
 
+       return 0;
+}
+
+static const struct ethtool_ops tlan_ethtool_ops = {
+       .get_drvinfo    = tlan_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_eeprom_len = tlan_get_eeprom_len,
+       .get_eeprom     = tlan_get_eeprom,
+};
 
 /***************************************************************
  *     tlan_init
@@ -830,7 +865,7 @@ static int tlan_init(struct net_device *dev)
                priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
        err = 0;
-       for (i = 0;  i < 6 ; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                err |= tlan_ee_read_byte(dev,
                                         (u8) priv->adapter->addr_ofs + i,
                                         (u8 *) &dev->dev_addr[i]);
@@ -838,12 +873,20 @@ static int tlan_init(struct net_device *dev)
                pr_err("%s: Error reading MAC from eeprom: %d\n",
                       dev->name, err);
        }
-       dev->addr_len = 6;
+       /* Olicom OC-2325/OC-2326 have the address byte-swapped */
+       if (priv->adapter->addr_ofs == 0xf8) {
+               for (i = 0; i < ETH_ALEN; i += 2) {
+                       char tmp = dev->dev_addr[i];
+                       dev->dev_addr[i] = dev->dev_addr[i + 1];
+                       dev->dev_addr[i + 1] = tmp;
+               }
+       }
 
        netif_carrier_off(dev);
 
        /* Device methods */
        dev->netdev_ops = &tlan_netdev_ops;
+       dev->ethtool_ops = &tlan_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        return 0;
@@ -886,6 +929,7 @@ static int tlan_open(struct net_device *dev)
        }
 
        init_timer(&priv->timer);
+       init_timer(&priv->media_timer);
 
        tlan_start(dev);
 
@@ -1156,9 +1200,6 @@ static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 
 static int tlan_close(struct net_device *dev)
 {
-       struct tlan_priv *priv = netdev_priv(dev);
-
-       priv->neg_be_verbose = 0;
        tlan_stop(dev);
 
        free_irq(dev->irq, dev);
@@ -1808,11 +1849,6 @@ static void tlan_timer(unsigned long data)
        priv->timer.function = NULL;
 
        switch (priv->timer_type) {
-#ifdef MONITOR
-       case TLAN_TIMER_LINK_BEAT:
-               tlan_phy_monitor(dev);
-               break;
-#endif
        case TLAN_TIMER_PHY_PDOWN:
                tlan_phy_power_down(dev);
                break;
@@ -1856,8 +1892,6 @@ static void tlan_timer(unsigned long data)
 }
 
 
-
-
 /*****************************************************************************
 ******************************************************************************
 
@@ -2205,7 +2239,9 @@ tlan_reset_adapter(struct net_device *dev)
                }
        }
 
-       if (priv->phy_num == 0)
+       /* don't power down internal PHY if we're going to use it */
+       if (priv->phy_num == 0 ||
+          (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
                data |= TLAN_NET_CFG_PHY_EN;
        tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
@@ -2255,42 +2291,39 @@ tlan_finish_reset(struct net_device *dev)
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
                udelay(1000);
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
-               if ((status & MII_GS_LINK) &&
-                   /* We only support link info on Nat.Sem. PHY's */
-                   (tlphy_id1 == NAT_SEM_ID1) &&
-                   (tlphy_id2 == NAT_SEM_ID2)) {
-                       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
-                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
-
-                       netdev_info(dev,
-                                   "Link active with %s %uMbps %s-Duplex\n",
-                                   !(tlphy_par & TLAN_PHY_AN_EN_STAT)
-                                   ? "forced" : "Autonegotiation enabled,",
-                                   tlphy_par & TLAN_PHY_SPEED_100
-                                   ? 100 : 10,
-                                   tlphy_par & TLAN_PHY_DUPLEX_FULL
-                                   ? "Full" : "Half");
-
-                       if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
-                               netdev_info(dev, "Partner capability:");
-                               for (i = 5; i < 10; i++)
-                                       if (partner & (1 << i))
-                                               pr_cont(" %s", media[i-5]);
-                               pr_cont("\n");
-                       }
-
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
-#ifdef MONITOR
-                       /* We have link beat..for now anyway */
-                       priv->link = 1;
-                       /*Enabling link beat monitoring */
-                       tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
-#endif
-               } else if (status & MII_GS_LINK)  {
-                       netdev_info(dev, "Link active\n");
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
+               if (status & MII_GS_LINK) {
+                       /* We only support link info on Nat.Sem. PHY's */
+                       if ((tlphy_id1 == NAT_SEM_ID1) &&
+                           (tlphy_id2 == NAT_SEM_ID2)) {
+                               tlan_mii_read_reg(dev, phy, MII_AN_LPA,
+                                       &partner);
+                               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
+                                       &tlphy_par);
+
+                               netdev_info(dev,
+                                       "Link active, %s %uMbps %s-Duplex\n",
+                                       !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+                                       ? "forced" : "Autonegotiation enabled,",
+                                       tlphy_par & TLAN_PHY_SPEED_100
+                                       ? 100 : 10,
+                                       tlphy_par & TLAN_PHY_DUPLEX_FULL
+                                       ? "Full" : "Half");
+
+                               if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+                                       netdev_info(dev, "Partner capability:");
+                                       for (i = 5; i < 10; i++)
+                                               if (partner & (1 << i))
+                                                       pr_cont(" %s",
+                                                               media[i-5]);
+                                       pr_cont("\n");
+                               }
+                       } else
+                               netdev_info(dev, "Link active\n");
+                       /* Enabling link beat monitoring */
+                       priv->media_timer.function = tlan_phy_monitor;
+                       priv->media_timer.data = (unsigned long) dev;
+                       priv->media_timer.expires = jiffies + HZ;
+                       add_timer(&priv->media_timer);
                }
        }
 
@@ -2312,6 +2345,7 @@ tlan_finish_reset(struct net_device *dev)
                             dev->base_addr + TLAN_HOST_CMD + 1);
                outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
                outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                netif_carrier_on(dev);
        } else {
                netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
@@ -2494,9 +2528,10 @@ static void tlan_phy_power_down(struct net_device *dev)
        value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
        tlan_mii_sync(dev->base_addr);
        tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
-       if ((priv->phy_num == 0) &&
-           (priv->phy[1] != TLAN_PHY_NONE) &&
-           (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+       if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
+               /* if using internal PHY, the external PHY must be powered on */
+               if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
+                       value = MII_GC_ISOLATE; /* just isolate it from MII */
                tlan_mii_sync(dev->base_addr);
                tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
        }
@@ -2538,6 +2573,7 @@ static void tlan_phy_reset(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             phy;
        u16             value;
+       unsigned long timeout = jiffies + HZ;
 
        phy = priv->phy[priv->phy_num];
 
@@ -2545,9 +2581,13 @@ static void tlan_phy_reset(struct net_device *dev)
        tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK | MII_GC_RESET;
        tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
-       tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
-       while (value & MII_GC_RESET)
+       do {
                tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+               if (time_after(jiffies, timeout)) {
+                       netdev_err(dev, "PHY reset timeout\n");
+                       return;
+               }
+       } while (value & MII_GC_RESET);
 
        /* Wait for 500 ms and initialize.
         * I don't remember why I wait this long.
@@ -2653,7 +2693,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             an_adv;
        u16             an_lpa;
-       u16             data;
        u16             mode;
        u16             phy;
        u16             status;
@@ -2668,13 +2707,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
                /* Wait for 8 sec to give the process
                 * more time.  Perhaps we should fail after a while.
                 */
-               if (!priv->neg_be_verbose++) {
-                       pr_info("Giving autonegotiation more time.\n");
-                       pr_info("Please check that your adapter has\n");
-                       pr_info("been properly connected to a HUB or Switch.\n");
-                       pr_info("Trying to establish link in the background...\n");
-               }
-               tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
+               tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
                return;
        }
 
@@ -2687,13 +2720,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        else if (!(mode & 0x0080) && (mode & 0x0040))
                priv->tlan_full_duplex = true;
 
+       /* switch to internal PHY for 10 Mbps */
        if ((!(mode & 0x0180)) &&
            (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
            (priv->phy_num != 0)) {
                priv->phy_num = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
-                       | TLAN_NET_CFG_PHY_EN;
-               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
                tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
        }
@@ -2717,7 +2748,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
 
 }
 
-#ifdef MONITOR
 
 /*********************************************************************
  *
@@ -2727,18 +2757,18 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
  *           None
  *
  *     Params:
- *           dev            The device structure of this device.
+ *           data           The device structure of this device.
  *
  *
  *     This function monitors PHY condition by reading the status
- *     register via the MII bus. This can be used to give info
- *     about link changes (up/down), and possible switch to alternate
- *     media.
+ *     register via the MII bus, controls LINK LED and notifies the
+ *     kernel about link state.
  *
  *******************************************************************/
 
-void tlan_phy_monitor(struct net_device *dev)
+static void tlan_phy_monitor(unsigned long data)
 {
+       struct net_device *dev = (struct net_device *) data;
        struct tlan_priv *priv = netdev_priv(dev);
        u16     phy;
        u16     phy_status;
@@ -2750,30 +2780,40 @@ void tlan_phy_monitor(struct net_device *dev)
 
        /* Check if link has been lost */
        if (!(phy_status & MII_GS_LINK)) {
-               if (priv->link) {
-                       priv->link = 0;
+               if (netif_carrier_ok(dev)) {
                        printk(KERN_DEBUG "TLAN: %s has lost link\n",
                               dev->name);
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
                        netif_carrier_off(dev);
-                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
-                       return;
+                       if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
+                               /* power down internal PHY */
+                               u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
+                                          MII_GC_ISOLATE;
+
+                               tlan_mii_sync(dev->base_addr);
+                               tlan_mii_write_reg(dev, priv->phy[0],
+                                                  MII_GEN_CTL, data);
+                               /* set to external PHY */
+                               priv->phy_num = 1;
+                               /* restart autonegotiation */
+                               tlan_set_timer(dev, 4 * HZ / 10,
+                                              TLAN_TIMER_PHY_PDOWN);
+                               return;
+                       }
                }
        }
 
        /* Link restablished? */
-       if ((phy_status & MII_GS_LINK) && !priv->link) {
-               priv->link = 1;
+       if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
                       dev->name);
                netif_carrier_on(dev);
        }
-
-       /* Setup a new monitor */
-       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+       priv->media_timer.expires = jiffies + HZ;
+       add_timer(&priv->media_timer);
 }
 
-#endif /* MONITOR */
-
 
 /*****************************************************************************
 ******************************************************************************
index 2eb33a250788abca1235d3ee93d809b422eb328d..e9928411827e9e58e09cb81cee6dcdb419ba042b 100644 (file)
@@ -195,6 +195,7 @@ struct tlan_priv {
        u32                     timer_set_at;
        u32                     timer_type;
        struct timer_list       timer;
+       struct timer_list       media_timer;
        struct board            *adapter;
        u32                     adapter_rev;
        u32                     aui;
@@ -206,9 +207,7 @@ struct tlan_priv {
        u8                      tlan_rev;
        u8                      tlan_full_duplex;
        spinlock_t              lock;
-       u8                      link;
        struct work_struct                      tlan_tqueue;
-       u8                      neg_be_verbose;
 };
 
 
@@ -219,7 +218,6 @@ struct tlan_priv {
         *
         ****************************************************************/
 
-#define TLAN_TIMER_LINK_BEAT           1
 #define TLAN_TIMER_ACTIVITY            2
 #define TLAN_TIMER_PHY_PDOWN           3
 #define TLAN_TIMER_PHY_PUP             4
@@ -241,6 +239,7 @@ struct tlan_priv {
 #define TLAN_EEPROM_ACK                0
 #define TLAN_EEPROM_STOP       1
 
+#define TLAN_EEPROM_SIZE       256
 
 
 
index 4c70360967c244abb05a2152696a84bcfb534557..69557a26f7498ae261cc232f91ae0f5aeeb283f1 100644 (file)
@@ -2201,8 +2201,8 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
        /* Allocate the device structure.  Normally, "name" is a
         * template, instantiated by register_netdev(), but not for us.
         */
-       dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
-                              NR_CPUS, 1);
+       dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                              tile_net_setup, NR_CPUS, 1);
        if (!dev) {
                pr_err("alloc_netdev_mqs(%s) failed\n", name);
                return;
index e5a5c5d4ce0c8c8967c7963bf8bff36ee58f113f..88c71212669281efe24ba02a783709b2043f8529 100644 (file)
@@ -2292,7 +2292,8 @@ static struct net_device *tile_net_dev_init(const char *name)
         * tile_net_setup(), and saves "name".  Normally, "name" is a
         * template, instantiated by register_netdev(), but not for us.
         */
-       dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
+       dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                          tile_net_setup);
        if (!dev) {
                pr_err("alloc_netdev(%s) failed\n", name);
                return NULL;
index d568af1eb4f4b4a49cbe032a39f6b7cf8b8ee3da..0a7f2e77557f63eb8920c0b80c9552ec17e056ec 100644 (file)
@@ -723,13 +723,10 @@ static int gelic_wl_get_scan(struct net_device *netdev,
                /* If a scan in progress, caller should call me again */
                ret = -EAGAIN;
                goto out;
-               break;
-
        case GELIC_WL_SCAN_STAT_INIT:
                /* last scan request failed or never issued */
                ret = -ENODEV;
                goto out;
-               break;
        case GELIC_WL_SCAN_STAT_GOT_LIST:
                /* ok, use current list */
                break;
@@ -1831,25 +1828,18 @@ static const char *wpasecstr(enum gelic_eurus_wpa_security sec)
        switch (sec) {
        case GELIC_EURUS_WPA_SEC_NONE:
                return "NONE";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP:
                return "WPA_TKIP_TKIP";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES:
                return "WPA_TKIP_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_AES_AES:
                return "WPA_AES_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP:
                return "WPA2_TKIP_TKIP";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES:
                return "WPA2_TKIP_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_AES_AES:
                return "WPA2_AES_AES";
-               break;
        }
        return "";
 };
index 4ef818a7a6c623719f0507cfc64b56ef3de709d9..8a6e5c2d6f95a3f2f52f3d9b8148c81fffe7c5d3 100644 (file)
@@ -72,7 +72,7 @@ void temac_iow(struct temac_local *lp, int offset, u32 value)
 
 int temac_indirect_busywait(struct temac_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
 
        while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
                if (time_before_eq(end, jiffies)) {
index d4abf478e2bbf6ae25f5925f406d27923b2b949c..3b67d60d43787bb1442c2e7ec1771fd047f461fc 100644 (file)
@@ -19,7 +19,7 @@
 /* Wait till MDIO interface is ready to accept a new transaction.*/
 int axienet_mdio_wait_until_ready(struct axienet_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
        while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
                 XAE_MDIO_MCR_READY_MASK)) {
                if (time_before_eq(end, jiffies)) {
index 8c4aed3053ebc0a3a3757dcae408f25249f8e630..782bb9373cd817e366bc1ba914659285863bd4e4 100644 (file)
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 
 static int xemaclite_mdio_wait(struct net_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
 
        /* wait for the MDIO interface to not be busy or timeout
           after some time.
index 2aa57270838fb6e67ec6f54106cb21391e7fcd25..6eb849a56da568dcce7f7c423aa01503d9ea36ea 100644 (file)
  *             14 Jun 2005     macro           Use irqreturn_t.
  *             23 Oct 2006     macro           Big-endian host support.
  *             14 Dec 2006     macro           TURBOchannel support.
+ *             01 Jul 2014     macro           Fixes for DMA on 64-bit hosts.
  */
 
 /* Include files */
 
 /* Version information string should be updated prior to each new release!  */
 #define DRV_NAME "defxx"
-#define DRV_VERSION "v1.10"
-#define DRV_RELDATE "2006/12/14"
+#define DRV_VERSION "v1.11"
+#define DRV_RELDATE "2014/07/01"
 
 static char version[] =
        DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -1126,17 +1127,16 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
 
        /* Display virtual and physical addresses if debug driver */
 
-       DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
-                  print_name,
-                  (long)bp->descr_block_virt, bp->descr_block_phys);
-       DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
-       DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
-       DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
-       DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
+       DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
+                  print_name, bp->descr_block_virt, &bp->descr_block_phys);
+       DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
+                  print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
+       DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
+                  print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
+       DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
+                  print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
+       DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
+                  print_name, bp->cons_block_virt, &bp->cons_block_phys);
 
        return DFX_K_SUCCESS;
 }
@@ -2927,21 +2927,35 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
        for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
                for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
                {
-                       struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
+                       struct sk_buff *newskb;
+                       dma_addr_t dma_addr;
+
+                       newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
+                                                   GFP_NOIO);
                        if (!newskb)
                                return -ENOMEM;
-                       bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
-                               ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
                        /*
                         * align to 128 bytes for compatibility with
                         * the old EISA boards.
                         */
 
                        my_skb_align(newskb, 128);
+                       dma_addr = dma_map_single(bp->bus_dev,
+                                                 newskb->data,
+                                                 PI_RCV_DATA_K_SIZE_MAX,
+                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(bp->bus_dev, dma_addr)) {
+                               dev_kfree_skb(newskb);
+                               return -ENOMEM;
+                       }
+                       bp->descr_block_virt->rcv_data[i + j].long_0 =
+                               (u32)(PI_RCV_DESCR_M_SOP |
+                                     ((PI_RCV_DATA_K_SIZE_MAX /
+                                       PI_ALIGN_K_RCV_DATA_BUFF) <<
+                                      PI_RCV_DESCR_V_SEG_LEN));
                        bp->descr_block_virt->rcv_data[i + j].long_1 =
-                               (u32)dma_map_single(bp->bus_dev, newskb->data,
-                                                   NEW_SKB_SIZE,
-                                                   DMA_FROM_DEVICE);
+                               (u32)dma_addr;
+
                        /*
                         * p_rcv_buff_va is only used inside the
                         * kernel so we put the skb pointer here.
@@ -3008,7 +3022,7 @@ static void dfx_rcv_queue_process(
        PI_TYPE_2_CONSUMER      *p_type_2_cons;         /* ptr to rcv/xmt consumer block register */
        char                            *p_buff;                        /* ptr to start of packet receive buffer (FMC descriptor) */
        u32                                     descr, pkt_len;         /* FMC descriptor field and packet length */
-       struct sk_buff          *skb;                           /* pointer to a sk_buff to hold incoming packet data */
+       struct sk_buff          *skb = NULL;                    /* pointer to a sk_buff to hold incoming packet data */
 
        /* Service all consumed LLC receive frames */
 
@@ -3016,7 +3030,7 @@ static void dfx_rcv_queue_process(
        while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
                {
                /* Process any errors */
-
+               dma_addr_t dma_addr;
                int entry;
 
                entry = bp->rcv_xmt_reg.index.rcv_comp;
@@ -3025,6 +3039,11 @@ static void dfx_rcv_queue_process(
 #else
                p_buff = bp->p_rcv_buff_va[entry];
 #endif
+               dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
+               dma_sync_single_for_cpu(bp->bus_dev,
+                                       dma_addr + RCV_BUFF_K_DESCR,
+                                       sizeof(u32),
+                                       DMA_FROM_DEVICE);
                memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
 
                if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
@@ -3046,31 +3065,46 @@ static void dfx_rcv_queue_process(
                                bp->rcv_length_errors++;
                        else{
 #ifdef DYNAMIC_BUFFERS
+                               struct sk_buff *newskb = NULL;
+
                                if (pkt_len > SKBUFF_RX_COPYBREAK) {
-                                       struct sk_buff *newskb;
+                                       dma_addr_t new_dma_addr;
 
-                                       newskb = dev_alloc_skb(NEW_SKB_SIZE);
+                                       newskb = netdev_alloc_skb(bp->dev,
+                                                                 NEW_SKB_SIZE);
                                        if (newskb){
+                                               my_skb_align(newskb, 128);
+                                               new_dma_addr = dma_map_single(
+                                                               bp->bus_dev,
+                                                               newskb->data,
+                                                               PI_RCV_DATA_K_SIZE_MAX,
+                                                               DMA_FROM_DEVICE);
+                                               if (dma_mapping_error(
+                                                               bp->bus_dev,
+                                                               new_dma_addr)) {
+                                                       dev_kfree_skb(newskb);
+                                                       newskb = NULL;
+                                               }
+                                       }
+                                       if (newskb) {
                                                rx_in_place = 1;
 
-                                               my_skb_align(newskb, 128);
                                                skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
                                                dma_unmap_single(bp->bus_dev,
-                                                       bp->descr_block_virt->rcv_data[entry].long_1,
-                                                       NEW_SKB_SIZE,
+                                                       dma_addr,
+                                                       PI_RCV_DATA_K_SIZE_MAX,
                                                        DMA_FROM_DEVICE);
                                                skb_reserve(skb, RCV_BUFF_K_PADDING);
                                                bp->p_rcv_buff_va[entry] = (char *)newskb;
-                                               bp->descr_block_virt->rcv_data[entry].long_1 =
-                                                       (u32)dma_map_single(bp->bus_dev,
-                                                               newskb->data,
-                                                               NEW_SKB_SIZE,
-                                                               DMA_FROM_DEVICE);
-                                       } else
-                                               skb = NULL;
-                               } else
+                                               bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
+                                       }
+                               }
+                               if (!newskb)
 #endif
-                                       skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
+                                       /* Alloc new buffer to pass up,
+                                        * add room for PRH. */
+                                       skb = netdev_alloc_skb(bp->dev,
+                                                              pkt_len + 3);
                                if (skb == NULL)
                                        {
                                        printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
@@ -3080,6 +3114,12 @@ static void dfx_rcv_queue_process(
                                else {
                                        if (!rx_in_place) {
                                                /* Receive buffer allocated, pass receive packet up */
+                                               dma_sync_single_for_cpu(
+                                                       bp->bus_dev,
+                                                       dma_addr +
+                                                       RCV_BUFF_K_PADDING,
+                                                       pkt_len + 3,
+                                                       DMA_FROM_DEVICE);
 
                                                skb_copy_to_linear_data(skb,
                                                               p_buff + RCV_BUFF_K_PADDING,
@@ -3182,6 +3222,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
        u8                      prod;                           /* local transmit producer index */
        PI_XMT_DESCR            *p_xmt_descr;           /* ptr to transmit descriptor block entry */
        XMT_DRIVER_DESCR        *p_xmt_drv_descr;       /* ptr to transmit driver descriptor */
+       dma_addr_t              dma_addr;
        unsigned long           flags;
 
        netif_stop_queue(dev);
@@ -3229,6 +3270,20 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
                        }
                }
 
+       /* Write the three PRH bytes immediately before the FC byte */
+
+       skb_push(skb, 3);
+       skb->data[0] = DFX_PRH0_BYTE;   /* these byte values are defined */
+       skb->data[1] = DFX_PRH1_BYTE;   /* in the Motorola FDDI MAC chip */
+       skb->data[2] = DFX_PRH2_BYTE;   /* specification */
+
+       dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(bp->bus_dev, dma_addr)) {
+               skb_pull(skb, 3);
+               return NETDEV_TX_BUSY;
+       }
+
        spin_lock_irqsave(&bp->lock, flags);
 
        /* Get the current producer and the next free xmt data descriptor */
@@ -3249,13 +3304,6 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
 
        p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);     /* also bump producer index */
 
-       /* Write the three PRH bytes immediately before the FC byte */
-
-       skb_push(skb,3);
-       skb->data[0] = DFX_PRH0_BYTE;   /* these byte values are defined */
-       skb->data[1] = DFX_PRH1_BYTE;   /* in the Motorola FDDI MAC chip */
-       skb->data[2] = DFX_PRH2_BYTE;   /* specification */
-
        /*
         * Write the descriptor with buffer info and bump producer
         *
@@ -3284,8 +3332,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
         */
 
        p_xmt_descr->long_0     = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
-       p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
-                                                 skb->len, DMA_TO_DEVICE);
+       p_xmt_descr->long_1 = (u32)dma_addr;
 
        /*
         * Verify that descriptor is actually available
@@ -3448,8 +3495,13 @@ static void dfx_rcv_flush( DFX_board_t *bp )
                {
                        struct sk_buff *skb;
                        skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
-                       if (skb)
+                       if (skb) {
+                               dma_unmap_single(bp->bus_dev,
+                                                bp->descr_block_virt->rcv_data[i+j].long_1,
+                                                PI_RCV_DATA_K_SIZE_MAX,
+                                                DMA_FROM_DEVICE);
                                dev_kfree_skb(skb);
+                       }
                        bp->p_rcv_buff_va[i+j] = NULL;
                }
 
index 19a6f64df1984b77d85046895fb39d92e584b1b5..adb63f3f7b4a32b093c82cf29cf222db68c8acd7 100644 (file)
@@ -1693,7 +1693,7 @@ typedef union
 /* Only execute special print call when debug driver was built */
 
 #ifdef DEFXX_DEBUG
-#define DBG_printk(args...) printk(## args)
+#define DBG_printk(args...) printk(args)
 #else
 #define DBG_printk(args...)
 #endif
index 66e2b19ef709ef9180578a3f300b0d029d06805a..c3c4051a089df817b63d382645bdba0affd7f86f 100644 (file)
@@ -596,7 +596,8 @@ static int sixpack_open(struct tty_struct *tty)
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
+       dev = alloc_netdev(sizeof(struct sixpack), "sp%d", NET_NAME_UNKNOWN,
+                          sp_setup);
        if (!dev) {
                err = -ENOMEM;
                goto out;
index 484f77ec2ce1f439d21caad0656fc6d2924b1577..a98c153f371e761f2c396c7fbcc1bdf787f407f3 100644 (file)
@@ -1206,7 +1206,7 @@ static int __init init_baycomepp(void)
                struct net_device *dev;
                
                dev = alloc_netdev(sizeof(struct baycom_state), "bce%d",
-                                  baycom_epp_dev_setup);
+                                  NET_NAME_UNKNOWN, baycom_epp_dev_setup);
 
                if (!dev) {
                        printk(KERN_WARNING "bce%d : out of memory\n", i);
index d50b23cf9ea922e66495c68db0fd3fb830815ec2..c2894e43840e604e75eac9f40bcfb1470e4c47aa 100644 (file)
@@ -501,8 +501,8 @@ static int bpq_new_device(struct net_device *edev)
        struct net_device *ndev;
        struct bpqdev *bpq;
 
-       ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d",
-                          bpq_setup);
+       ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", NET_NAME_UNKNOWN,
+                           bpq_setup);
        if (!ndev)
                return -ENOMEM;
 
index 6636022a1027d6c4a91199b07e1116c3b9929b30..0fad408f24aa137694156290ae35a1da99343b80 100644 (file)
@@ -466,7 +466,7 @@ static int __init setup_adapter(int card_base, int type, int n)
        if (!info)
                goto out;
 
-       info->dev[0] = alloc_netdev(0, "", dev_setup);
+       info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
        if (!info->dev[0]) {
                printk(KERN_ERR "dmascc: "
                       "could not allocate memory for %s at %#3x\n",
@@ -474,7 +474,7 @@ static int __init setup_adapter(int card_base, int type, int n)
                goto out1;
        }
 
-       info->dev[1] = alloc_netdev(0, "", dev_setup);
+       info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
        if (!info->dev[1]) {
                printk(KERN_ERR "dmascc: "
                       "could not allocate memory for %s at %#3x\n",
index 5d78c1d08abd60fcc6bcee5dea5ab178a2a166da..c67a27245072746c3275a3b27a4fd899ebb6f3a4 100644 (file)
@@ -699,7 +699,7 @@ struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
        if (privsize < sizeof(struct hdlcdrv_state))
                privsize = sizeof(struct hdlcdrv_state);
 
-       dev = alloc_netdev(privsize, ifname, hdlcdrv_setup);
+       dev = alloc_netdev(privsize, ifname, NET_NAME_UNKNOWN, hdlcdrv_setup);
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
index 8a6c720a4cc9a86f073cf73bfdc4157820286bfa..f990bb1c3e02ba50c583e6d41d3f8472736a3e37 100644 (file)
@@ -734,7 +734,8 @@ static int mkiss_open(struct tty_struct *tty)
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
+       dev = alloc_netdev(sizeof(struct mkiss), "ax%d", NET_NAME_UNKNOWN,
+                          ax_setup);
        if (!dev) {
                err = -ENOMEM;
                goto out;
index 4bc6ee8e7987796b04a6f1ef5a1a5ffe044ec8af..57be9e0e98a68608fcbafade768bcf397b248e42 100644 (file)
@@ -1515,7 +1515,7 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
        int err;
        struct net_device *dev;
 
-       dev = alloc_netdev(0, name, scc_net_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, scc_net_setup);
        if (!dev) 
                return -ENOMEM;
 
index 81901659cc9ea1126e22f02e427dab1b9a729039..717433cfb81d2c4248539a56516daf6eb09c45ae 100644 (file)
@@ -1147,7 +1147,7 @@ static int __init yam_init_driver(void)
                sprintf(name, "yam%d", i);
                
                dev = alloc_netdev(sizeof(struct yam_port), name,
-                                  yam_setup);
+                                  NET_NAME_UNKNOWN, yam_setup);
                if (!dev) {
                        pr_err("yam: cannot allocate net device\n");
                        err = -ENOMEM;
index 6cc37c15e0bf98341131a8229bd7fbd2567994a8..24441ae832d108dc304372d03ec99c0e6281e4d8 100644 (file)
@@ -170,6 +170,7 @@ struct rndis_device {
 
        enum rndis_device_state state;
        bool link_state;
+       bool link_change;
        atomic_t new_req_id;
 
        spinlock_t request_lock;
@@ -185,7 +186,7 @@ int netvsc_device_remove(struct hv_device *device);
 int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet);
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
-                               unsigned int status);
+                               struct rndis_message *resp);
 int netvsc_recv_callback(struct hv_device *device_obj,
                        struct hv_netvsc_packet *packet,
                        struct ndis_tcp_ip_checksum_info *csum_info);
index 4ed38eaecea805b72349f64b8dede105b5a4fcf4..f13e0acc8a69ea6bd3975ef2e0c76d3fb831cf6f 100644 (file)
@@ -1094,9 +1094,7 @@ close:
        vmbus_close(device->channel);
 
 cleanup:
-
-       if (net_device)
-               kfree(net_device);
+       kfree(net_device);
 
        return ret;
 }
index 4fd71b75e666418ab7063447160d561a294a0966..a9c5eaadc426b8cd93e1cacfb65142d306b22f78 100644 (file)
@@ -579,8 +579,9 @@ drop:
  * netvsc_linkstatus_callback - Link up/down notification
  */
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
-                                      unsigned int status)
+                               struct rndis_message *resp)
 {
+       struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
        struct net_device *net;
        struct net_device_context *ndev_ctx;
        struct netvsc_device *net_device;
@@ -589,7 +590,19 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
        net_device = hv_get_drvdata(device_obj);
        rdev = net_device->extension;
 
-       rdev->link_state = status != 1;
+       switch (indicate->status) {
+       case RNDIS_STATUS_MEDIA_CONNECT:
+               rdev->link_state = false;
+               break;
+       case RNDIS_STATUS_MEDIA_DISCONNECT:
+               rdev->link_state = true;
+               break;
+       case RNDIS_STATUS_NETWORK_CHANGE:
+               rdev->link_change = true;
+               break;
+       default:
+               return;
+       }
 
        net = net_device->ndev;
 
@@ -597,7 +610,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
                return;
 
        ndev_ctx = netdev_priv(net);
-       if (status == 1) {
+       if (!rdev->link_state) {
                schedule_delayed_work(&ndev_ctx->dwork, 0);
                schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
        } else {
@@ -736,6 +749,14 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
        return err;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netvsc_poll_controller(struct net_device *net)
+{
+       /* As netvsc_start_xmit() works synchronous we don't have to
+        * trigger anything here.
+        */
+}
+#endif
 
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
@@ -751,6 +772,9 @@ static const struct net_device_ops device_ops = {
        .ndo_validate_addr =            eth_validate_addr,
        .ndo_set_mac_address =          netvsc_set_mac_addr,
        .ndo_select_queue =             netvsc_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller =          netvsc_poll_controller,
+#endif
 };
 
 /*
@@ -767,7 +791,9 @@ static void netvsc_link_change(struct work_struct *w)
        struct net_device *net;
        struct netvsc_device *net_device;
        struct rndis_device *rdev;
-       bool notify;
+       bool notify, refresh = false;
+       char *argv[] = { "/etc/init.d/network", "restart", NULL };
+       char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
 
        rtnl_lock();
 
@@ -782,10 +808,17 @@ static void netvsc_link_change(struct work_struct *w)
        } else {
                netif_carrier_on(net);
                notify = true;
+               if (rdev->link_change) {
+                       rdev->link_change = false;
+                       refresh = true;
+               }
        }
 
        rtnl_unlock();
 
+       if (refresh)
+               call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+
        if (notify)
                netdev_notify_peers(net);
 }
index 99c527adae5bf1ee154b2a02eb0f93a6df32e333..2b86f0b6f6d18adf8b13bdba263a426a4d89aaa6 100644 (file)
@@ -320,25 +320,6 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
        }
 }
 
-static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
-                                            struct rndis_message *resp)
-{
-       struct rndis_indicate_status *indicate =
-                       &resp->msg.indicate_status;
-
-       if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
-               netvsc_linkstatus_callback(
-                       dev->net_dev->dev, 1);
-       } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
-               netvsc_linkstatus_callback(
-                       dev->net_dev->dev, 0);
-       } else {
-               /*
-                * TODO:
-                */
-       }
-}
-
 /*
  * Get the Per-Packet-Info with the specified type
  * return NULL if not found.
@@ -464,7 +445,7 @@ int rndis_filter_receive(struct hv_device *dev,
 
        case RNDIS_MSG_INDICATE:
                /* notification msgs */
-               rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
+               netvsc_linkstatus_callback(dev, rndis_msg);
                break;
        default:
                netdev_err(ndev,
index 3e89beab64fdc87559a2b1b9e27a7e6c6ba2b04c..391a916622a94d0354968ef4c1627c918fcf234d 100644 (file)
@@ -34,6 +34,7 @@ config IEEE802154_AT86RF230
        depends on IEEE802154_DRIVERS && MAC802154
        tristate "AT86RF230/231/233/212 transceiver driver"
        depends on SPI
+       select REGMAP_SPI
        ---help---
          Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
          controller.
@@ -51,3 +52,14 @@ config IEEE802154_MRF24J40
 
          This driver can also be built as a module. To do so, say M here.
          the module will be called 'mrf24j40'.
+
+config IEEE802154_CC2520
+       depends on IEEE802154_DRIVERS && MAC802154
+       tristate "CC2520 transceiver driver"
+       depends on SPI
+       ---help---
+         Say Y here to enable the CC2520 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'cc2520'.
index abb0c08decb0f89a623921bab13d223f4acb56bd..655cb95e6e247b42764151ed6a5768bd5f58d9b8 100644 (file)
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
 obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
 obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
+obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
index 50899416f66873d562df5cea789d1807b5ee60ab..c9d2a752abd7b176e2f4889d3fc9a9cf4d8fd322 100644 (file)
@@ -19,6 +19,7 @@
  * Written by:
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
  * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ * Alexander Aring <aar@pengutronix.de>
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/at86rf230.h>
+#include <linux/regmap.h>
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 
+#include <net/ieee802154.h>
 #include <net/mac802154.h>
 #include <net/wpan-phy.h>
 
-struct at86rf230_local {
-       struct spi_device *spi;
+struct at86rf230_local;
+/* at86rf2xx chip depend data.
+ * All timings are in us.
+ */
+struct at86rf2xx_chip_data {
+       u16 t_sleep_cycle;
+       u16 t_channel_switch;
+       u16 t_reset_to_off;
+       u16 t_off_to_aack;
+       u16 t_off_to_tx_on;
+       u16 t_frame;
+       u16 t_p_ack;
+       /* short interframe spacing time */
+       u16 t_sifs;
+       /* long interframe spacing time */
+       u16 t_lifs;
+       /* completion timeout for tx in msecs */
+       u16 t_tx_timeout;
+       int rssi_base_val;
 
-       u8 part;
-       u8 vers;
+       int (*set_channel)(struct at86rf230_local *, int, int);
+       int (*get_desense_steps)(struct at86rf230_local *, s32);
+};
 
-       u8 buf[2];
-       struct mutex bmux;
+#define AT86RF2XX_MAX_BUF (127 + 3)
 
-       struct work_struct irqwork;
-       struct completion tx_complete;
+struct at86rf230_state_change {
+       struct at86rf230_local *lp;
+
+       struct spi_message msg;
+       struct spi_transfer trx;
+       u8 buf[AT86RF2XX_MAX_BUF];
+
+       void (*complete)(void *context);
+       u8 from_state;
+       u8 to_state;
+};
+
+struct at86rf230_local {
+       struct spi_device *spi;
 
        struct ieee802154_dev *dev;
+       struct at86rf2xx_chip_data *data;
+       struct regmap *regmap;
 
-       spinlock_t lock;
-       bool irq_busy;
-       bool is_tx;
-       bool tx_aret;
+       struct completion state_complete;
+       struct at86rf230_state_change state;
 
-       int rssi_base_val;
-};
+       struct at86rf230_state_change irq;
 
-static bool is_rf212(struct at86rf230_local *local)
-{
-       return local->part == 7;
-}
+       bool tx_aret;
+       bool is_tx;
+       /* spinlock for is_tx protection */
+       spinlock_t lock;
+       struct completion tx_complete;
+       struct sk_buff *tx_skb;
+       struct at86rf230_state_change tx;
+};
 
 #define        RG_TRX_STATUS   (0x01)
 #define        SR_TRX_STATUS           0x01, 0x1f, 0
@@ -256,344 +289,753 @@ static bool is_rf212(struct at86rf230_local *local)
 #define STATE_BUSY_RX_AACK_NOCLK 0x1E
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
+#define AT86RF2XX_NUMREGS 0x3F
+
 static int
-__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
-               u8 *version)
+at86rf230_async_state_change(struct at86rf230_local *lp,
+                            struct at86rf230_state_change *ctx,
+                            const u8 state, void (*complete)(void *context));
+
+static inline int
+__at86rf230_write(struct at86rf230_local *lp,
+                 unsigned int addr, unsigned int data)
 {
-       u8 data[4];
-       u8 *buf = kmalloc(2, GFP_KERNEL);
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-               .rx_buf = buf,
-       };
-       u8 reg;
-
-       if (!buf)
-               return -ENOMEM;
+       return regmap_write(lp->regmap, addr, data);
+}
 
-       for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
-               buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
-               buf[1] = 0xff;
-               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
-               spi_message_init(&msg);
-               spi_message_add_tail(&xfer, &msg);
+static inline int
+__at86rf230_read(struct at86rf230_local *lp,
+                unsigned int addr, unsigned int *data)
+{
+       return regmap_read(lp->regmap, addr, data);
+}
 
-               status = spi_sync(spi, &msg);
-               dev_vdbg(&spi->dev, "status = %d\n", status);
-               if (msg.status)
-                       status = msg.status;
+static inline int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+                     unsigned int addr, unsigned int mask,
+                     unsigned int shift, unsigned int *data)
+{
+       int rc;
 
-               dev_vdbg(&spi->dev, "status = %d\n", status);
-               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
-               dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+       rc = __at86rf230_read(lp, addr, data);
+       if (rc > 0)
+               *data = (*data & mask) >> shift;
 
-               if (status == 0)
-                       data[reg - RG_PART_NUM] = buf[1];
-               else
-                       break;
+       return rc;
+}
+
+static inline int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+                      unsigned int addr, unsigned int mask,
+                      unsigned int shift, unsigned int data)
+{
+       return regmap_update_bits(lp->regmap, addr, mask, data << shift);
+}
+
+static bool
+at86rf230_reg_writeable(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case RG_TRX_STATE:
+       case RG_TRX_CTRL_0:
+       case RG_TRX_CTRL_1:
+       case RG_PHY_TX_PWR:
+       case RG_PHY_ED_LEVEL:
+       case RG_PHY_CC_CCA:
+       case RG_CCA_THRES:
+       case RG_RX_CTRL:
+       case RG_SFD_VALUE:
+       case RG_TRX_CTRL_2:
+       case RG_ANT_DIV:
+       case RG_IRQ_MASK:
+       case RG_VREG_CTRL:
+       case RG_BATMON:
+       case RG_XOSC_CTRL:
+       case RG_RX_SYN:
+       case RG_XAH_CTRL_1:
+       case RG_FTN_CTRL:
+       case RG_PLL_CF:
+       case RG_PLL_DCU:
+       case RG_SHORT_ADDR_0:
+       case RG_SHORT_ADDR_1:
+       case RG_PAN_ID_0:
+       case RG_PAN_ID_1:
+       case RG_IEEE_ADDR_0:
+       case RG_IEEE_ADDR_1:
+       case RG_IEEE_ADDR_2:
+       case RG_IEEE_ADDR_3:
+       case RG_IEEE_ADDR_4:
+       case RG_IEEE_ADDR_5:
+       case RG_IEEE_ADDR_6:
+       case RG_IEEE_ADDR_7:
+       case RG_XAH_CTRL_0:
+       case RG_CSMA_SEED_0:
+       case RG_CSMA_SEED_1:
+       case RG_CSMA_BE:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool
+at86rf230_reg_readable(struct device *dev, unsigned int reg)
+{
+       bool rc;
+
+       /* all writeable are also readable */
+       rc = at86rf230_reg_writeable(dev, reg);
+       if (rc)
+               return rc;
+
+       /* readonly regs */
+       switch (reg) {
+       case RG_TRX_STATUS:
+       case RG_PHY_RSSI:
+       case RG_IRQ_STATUS:
+       case RG_PART_NUM:
+       case RG_VERSION_NUM:
+       case RG_MAN_ID_1:
+       case RG_MAN_ID_0:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool
+at86rf230_reg_volatile(struct device *dev, unsigned int reg)
+{
+       /* can be changed during runtime */
+       switch (reg) {
+       case RG_TRX_STATUS:
+       case RG_TRX_STATE:
+       case RG_PHY_RSSI:
+       case RG_PHY_ED_LEVEL:
+       case RG_IRQ_STATUS:
+       case RG_VREG_CTRL:
+               return true;
+       default:
+               return false;
        }
+}
 
-       if (status == 0) {
-               *part = data[0];
-               *version = data[1];
-               *man_id = (data[3] << 8) | data[2];
+static bool
+at86rf230_reg_precious(struct device *dev, unsigned int reg)
+{
+       /* don't clear irq line on read */
+       switch (reg) {
+       case RG_IRQ_STATUS:
+               return true;
+       default:
+               return false;
        }
+}
 
-       kfree(buf);
+static struct regmap_config at86rf230_regmap_spi_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .write_flag_mask = CMD_REG | CMD_WRITE,
+       .read_flag_mask = CMD_REG,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = AT86RF2XX_NUMREGS,
+       .writeable_reg = at86rf230_reg_writeable,
+       .readable_reg = at86rf230_reg_readable,
+       .volatile_reg = at86rf230_reg_volatile,
+       .precious_reg = at86rf230_reg_precious,
+};
 
-       return status;
+static void
+at86rf230_async_error_recover(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+
+       at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
 }
 
-static int
-__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+static void
+at86rf230_async_error(struct at86rf230_local *lp,
+                     struct at86rf230_state_change *ctx, int rc)
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-       buf[1] = data;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       return status;
+       dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+
+       at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+                                    at86rf230_async_error_recover);
 }
 
+/* Generic function to get some register value in async mode */
 static int
-__at86rf230_read_subreg(struct at86rf230_local *lp,
-                       u8 addr, u8 mask, int shift, u8 *data)
+at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
+                        struct at86rf230_state_change *ctx,
+                        void (*complete)(void *context))
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-               .rx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
-       buf[1] = 0xff;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       if (status == 0)
-               *data = (buf[1] & mask) >> shift;
-
-       return status;
+       u8 *tx_buf = ctx->buf;
+
+       tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+       ctx->trx.len = 2;
+       ctx->msg.complete = complete;
+       return spi_async(lp->spi, &ctx->msg);
 }
 
-static int
-at86rf230_read_subreg(struct at86rf230_local *lp,
-                     u8 addr, u8 mask, int shift, u8 *data)
+static void
+at86rf230_async_state_assert(void *context)
 {
-       int status;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = ctx->buf;
+       const u8 trx_state = buf[1] & 0x1f;
+
+       /* Assert state change */
+       if (trx_state != ctx->to_state) {
+               /* Special handling if transceiver state is in
+                * STATE_BUSY_RX_AACK and a SHR was detected.
+                */
+               if  (trx_state == STATE_BUSY_RX_AACK) {
+                       /* Undocumented race condition. If we send a state
+                        * change to STATE_RX_AACK_ON the transceiver could
+                        * change his state automatically to STATE_BUSY_RX_AACK
+                        * if a SHR was detected. This is not an error, but we
+                        * can't assert this.
+                        */
+                       if (ctx->to_state == STATE_RX_AACK_ON)
+                               goto done;
+
+                       /* If we change to STATE_TX_ON without forcing and
+                        * transceiver state is STATE_BUSY_RX_AACK, we wait
+                        * 'tFrame + tPAck' receiving time. In this time the
+                        * PDU should be received. If the transceiver is still
+                        * in STATE_BUSY_RX_AACK, we run a force state change
+                        * to STATE_TX_ON. This is a timeout handling, if the
+                        * transceiver stucks in STATE_BUSY_RX_AACK.
+                        */
+                       if (ctx->to_state == STATE_TX_ON) {
+                               at86rf230_async_state_change(lp, ctx,
+                                                            STATE_FORCE_TX_ON,
+                                                            ctx->complete);
+                               return;
+                       }
+               }
+
 
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
-       mutex_unlock(&lp->bmux);
+               dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
+                        ctx->from_state, ctx->to_state, trx_state);
+       }
 
-       return status;
+done:
+       if (ctx->complete)
+               ctx->complete(context);
 }
 
-static int
-at86rf230_write_subreg(struct at86rf230_local *lp,
-                      u8 addr, u8 mask, int shift, u8 data)
+/* Do state change timing delay. */
+static void
+at86rf230_async_state_delay(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       struct at86rf2xx_chip_data *c = lp->data;
+       bool force = false;
+       int rc;
+
+       /* The force state changes are will show as normal states in the
+        * state status subregister. We change the to_state to the
+        * corresponding one and remember if it was a force change, this
+        * differs if we do a state change from STATE_BUSY_RX_AACK.
+        */
+       switch (ctx->to_state) {
+       case STATE_FORCE_TX_ON:
+               ctx->to_state = STATE_TX_ON;
+               force = true;
+               break;
+       case STATE_FORCE_TRX_OFF:
+               ctx->to_state = STATE_TRX_OFF;
+               force = true;
+               break;
+       default:
+               break;
+       }
+
+       switch (ctx->from_state) {
+       case STATE_TRX_OFF:
+               switch (ctx->to_state) {
+               case STATE_RX_AACK_ON:
+                       usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10);
+                       goto change;
+               case STATE_TX_ON:
+                       usleep_range(c->t_off_to_tx_on,
+                                    c->t_off_to_tx_on + 10);
+                       goto change;
+               default:
+                       break;
+               }
+               break;
+       case STATE_BUSY_RX_AACK:
+               switch (ctx->to_state) {
+               case STATE_TX_ON:
+                       /* Wait for worst case receiving time if we
+                        * didn't make a force change from BUSY_RX_AACK
+                        * to TX_ON.
+                        */
+                       if (!force) {
+                               usleep_range(c->t_frame + c->t_p_ack,
+                                            c->t_frame + c->t_p_ack + 1000);
+                               goto change;
+                       }
+                       break;
+               default:
+                       break;
+               }
+               break;
+       /* Default value, means RESET state */
+       case STATE_P_ON:
+               switch (ctx->to_state) {
+               case STATE_TRX_OFF:
+                       usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10);
+                       goto change;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       /* Default delay is 1us in the most cases */
+       udelay(1);
+
+change:
+       rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                     at86rf230_async_state_assert);
+       if (rc)
+               dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+}
+
+static void
+at86rf230_async_state_change_start(void *context)
 {
-       int status;
-       u8 val;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       u8 *buf = ctx->buf;
+       const u8 trx_state = buf[1] & 0x1f;
+       int rc;
 
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
-       if (status)
-               goto out;
+       /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
+       if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
+               udelay(1);
+               rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                             at86rf230_async_state_change_start);
+               if (rc)
+                       dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+               return;
+       }
 
-       val &= ~mask;
-       val |= (data << shift) & mask;
+       /* Check if we already are in the state which we change in */
+       if (trx_state == ctx->to_state) {
+               if (ctx->complete)
+                       ctx->complete(context);
+               return;
+       }
 
-       status = __at86rf230_write(lp, addr, val);
-out:
-       mutex_unlock(&lp->bmux);
+       /* Set current state to the context of state change */
+       ctx->from_state = trx_state;
 
-       return status;
+       /* Going into the next step for a state change which do a timing
+        * relevant delay.
+        */
+       buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+       buf[1] = ctx->to_state;
+       ctx->trx.len = 2;
+       ctx->msg.complete = at86rf230_async_state_delay;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc)
+               dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
 }
 
 static int
-at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+at86rf230_async_state_change(struct at86rf230_local *lp,
+                            struct at86rf230_state_change *ctx,
+                            const u8 state, void (*complete)(void *context))
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = len,
-               .tx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
-       buf[0] = CMD_WRITE | CMD_FB;
-       buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
-
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       mutex_unlock(&lp->bmux);
-       return status;
+       /* Initialization for the state change context */
+       ctx->to_state = state;
+       ctx->complete = complete;
+       return at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                       at86rf230_async_state_change_start);
 }
 
+static void
+at86rf230_sync_state_change_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+
+       complete(&lp->state_complete);
+}
+
+/* This function do a sync framework above the async state change.
+ * Some callbacks of the IEEE 802.15.4 driver interface need to be
+ * handled synchronously.
+ */
 static int
-at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_head1 = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = 0,
-               .rx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
+       int rc;
 
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
+       rc = at86rf230_async_state_change(lp, &lp->state, state,
+                                         at86rf230_sync_state_change_complete);
+       if (rc) {
+               at86rf230_async_error(lp, &lp->state, rc);
+               return rc;
+       }
 
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
+       rc = wait_for_completion_timeout(&lp->state_complete,
+                                        msecs_to_jiffies(100));
+       if (!rc)
+               return -ETIMEDOUT;
 
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       return 0;
+}
 
-       xfer_buf.len = *(buf + 1) + 1;
-       *len = buf[1];
+static void
+at86rf230_tx_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
 
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
+       complete(&lp->tx_complete);
+}
 
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head1, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
+static void
+at86rf230_tx_on(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
+
+       rc = at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+                                         at86rf230_tx_complete);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-       status = spi_sync(lp->spi, &msg);
+static void
+at86rf230_tx_trac_error(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
 
-       if (msg.status)
-               status = msg.status;
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                         at86rf230_tx_on);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+static void
+at86rf230_tx_trac_check(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = ctx->buf;
+       const u8 trac = (buf[1] & 0xe0) >> 5;
+       int rc;
 
-       if (status) {
-               if (lqi && (*len > lp->buf[1]))
-                       *lqi = data[lp->buf[1]];
+       /* If trac status is different than zero we need to do a state change
+        * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
+        * state to TX_ON.
+        */
+       if (trac) {
+               rc = at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+                                                 at86rf230_tx_trac_error);
+               if (rc)
+                       at86rf230_async_error(lp, ctx, rc);
+               return;
        }
-       mutex_unlock(&lp->bmux);
 
-       return status;
+       at86rf230_tx_on(context);
 }
 
-static int
-at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+
+static void
+at86rf230_tx_trac_status(void *context)
 {
-       might_sleep();
-       BUG_ON(!level);
-       *level = 0xbe;
-       return 0;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
+
+       rc = at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+                                     at86rf230_tx_trac_check);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
+
+static void
+at86rf230_rx(struct at86rf230_local *lp,
+            const u8 *data, u8 len)
+{
+       u8 lqi;
+       struct sk_buff *skb;
+       u8 rx_local_buf[AT86RF2XX_MAX_BUF];
+
+       if (len < 2)
+               return;
+
+       /* read full frame buffer and invalid lqi value to lowest
+        * indicator if frame was is in a corrupted state.
+        */
+       if (len > IEEE802154_MTU) {
+               lqi = 0;
+               len = IEEE802154_MTU;
+               dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+       } else {
+               lqi = data[len];
+       }
+
+       memcpy(rx_local_buf, data, len);
+       enable_irq(lp->spi->irq);
+
+       skb = alloc_skb(IEEE802154_MTU, GFP_ATOMIC);
+       if (!skb) {
+               dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
+               return;
+       }
+
+       memcpy(skb_put(skb, len), rx_local_buf, len);
+
+       /* We do not put CRC into the frame */
+       skb_trim(skb, len - 2);
+
+       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+}
+
+static void
+at86rf230_rx_read_frame_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = lp->irq.buf;
+       const u8 len = buf[1];
+
+       at86rf230_rx(lp, buf + 2, len);
 }
 
 static int
-at86rf230_state(struct ieee802154_dev *dev, int state)
+at86rf230_rx_read_frame(struct at86rf230_local *lp)
 {
-       struct at86rf230_local *lp = dev->priv;
+       u8 *buf = lp->irq.buf;
+
+       buf[0] = CMD_FB;
+       lp->irq.trx.len = AT86RF2XX_MAX_BUF;
+       lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
+       return spi_async(lp->spi, &lp->irq.msg);
+}
+
+static void
+at86rf230_rx_trac_check(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
        int rc;
-       u8 val;
-       u8 desired_status;
 
-       might_sleep();
+       /* Possible check on trac status here. This could be useful to make
+        * some stats why receive is failed. Not used at the moment, but it's
+        * maybe timing relevant. Datasheet doesn't say anything about this.
+        * The programming guide say do it so.
+        */
 
-       if (state == STATE_FORCE_TX_ON)
-               desired_status = STATE_TX_ON;
-       else if (state == STATE_FORCE_TRX_OFF)
-               desired_status = STATE_TRX_OFF;
-       else
-               desired_status = state;
+       rc = at86rf230_rx_read_frame(lp);
+       if (rc) {
+               enable_irq(lp->spi->irq);
+               at86rf230_async_error(lp, ctx, rc);
+       }
+}
+
+static int
+at86rf230_irq_trx_end(struct at86rf230_local *lp)
+{
+       spin_lock(&lp->lock);
+       if (lp->is_tx) {
+               lp->is_tx = 0;
+               spin_unlock(&lp->lock);
+               enable_irq(lp->spi->irq);
+
+               if (lp->tx_aret)
+                       return at86rf230_async_state_change(lp, &lp->irq,
+                                                           STATE_FORCE_TX_ON,
+                                                           at86rf230_tx_trac_status);
+               else
+                       return at86rf230_async_state_change(lp, &lp->irq,
+                                                           STATE_RX_AACK_ON,
+                                                           at86rf230_tx_complete);
+       } else {
+               spin_unlock(&lp->lock);
+               return at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
+                                               at86rf230_rx_trac_check);
+       }
+}
 
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+static void
+at86rf230_irq_status(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = lp->irq.buf;
+       const u8 irq = buf[1];
+       int rc;
+
+       if (irq & IRQ_TRX_END) {
+               rc = at86rf230_irq_trx_end(lp);
                if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
+                       at86rf230_async_error(lp, ctx, rc);
+       } else {
+               enable_irq(lp->spi->irq);
+               dev_err(&lp->spi->dev, "not supported irq %02x received\n",
+                       irq);
+       }
+}
 
-       if (val == desired_status)
-               return 0;
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+       struct at86rf230_local *lp = data;
+       struct at86rf230_state_change *ctx = &lp->irq;
+       u8 *buf = ctx->buf;
+       int rc;
 
-       /* state is equal to phy states */
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
-       if (rc)
-               goto err;
+       disable_irq_nosync(lp->spi->irq);
 
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
-               if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
+       buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
+       ctx->trx.len = 2;
+       ctx->msg.complete = at86rf230_irq_status;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc) {
+               at86rf230_async_error(lp, ctx, rc);
+               return IRQ_NONE;
+       }
 
+       return IRQ_HANDLED;
+}
 
-       if (val == desired_status ||
-           (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
-           (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
-               return 0;
+static void
+at86rf230_write_frame_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       u8 *buf = ctx->buf;
+       int rc;
 
-       pr_err("unexpected state change: %d, asked for %d\n", val, state);
-       return -EBUSY;
+       buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+       buf[1] = STATE_BUSY_TX;
+       ctx->trx.len = 2;
+       ctx->msg.complete = NULL;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-err:
-       pr_err("error: %d\n", rc);
-       return rc;
+static void
+at86rf230_write_frame(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       struct sk_buff *skb = lp->tx_skb;
+       u8 *buf = lp->tx.buf;
+       int rc;
+
+       spin_lock(&lp->lock);
+       lp->is_tx = 1;
+       spin_unlock(&lp->lock);
+
+       buf[0] = CMD_FB | CMD_WRITE;
+       buf[1] = skb->len + 2;
+       memcpy(buf + 2, skb->data, skb->len);
+       lp->tx.trx.len = skb->len + 2;
+       lp->tx.msg.complete = at86rf230_write_frame_complete;
+       rc = spi_async(lp->spi, &lp->tx.msg);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
+
+static void
+at86rf230_xmit_tx_on(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
+
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+                                         at86rf230_write_frame);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
 }
 
 static int
-at86rf230_start(struct ieee802154_dev *dev)
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
 {
        struct at86rf230_local *lp = dev->priv;
-       u8 rc;
+       struct at86rf230_state_change *ctx = &lp->tx;
 
-       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
-       if (rc)
-               return rc;
+       void (*tx_complete)(void *context) = at86rf230_write_frame;
+       int rc;
 
-       rc = at86rf230_state(dev, STATE_TX_ON);
-       if (rc)
+       lp->tx_skb = skb;
+
+       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
+        * are in STATE_TX_ON. The pfad differs here, so we change
+        * the complete handler.
+        */
+       if (lp->tx_aret)
+               tx_complete = at86rf230_xmit_tx_on;
+
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                         tx_complete);
+       if (rc) {
+               at86rf230_async_error(lp, ctx, rc);
                return rc;
+       }
+       rc = wait_for_completion_interruptible_timeout(&lp->tx_complete,
+                                                      msecs_to_jiffies(lp->data->t_tx_timeout));
+       if (!rc) {
+               at86rf230_async_error(lp, ctx, rc);
+               return -ETIMEDOUT;
+       }
+
+       /* Interfame spacing time, which is phy depend.
+        * TODO
+        * Move this handling in MAC 802.15.4 layer.
+        * This is currently a workaround to avoid fragmenation issues.
+        */
+       if (skb->len > 18)
+               usleep_range(lp->data->t_lifs, lp->data->t_lifs + 10);
+       else
+               usleep_range(lp->data->t_sifs, lp->data->t_sifs + 10);
+
+       return 0;
+}
 
-       return at86rf230_state(dev, STATE_RX_AACK_ON);
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       might_sleep();
+       BUG_ON(!level);
+       *level = 0xbe;
+       return 0;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+       return at86rf230_sync_state_change(dev->priv, STATE_RX_AACK_ON);
 }
 
 static void
 at86rf230_stop(struct ieee802154_dev *dev)
 {
-       at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+       at86rf230_sync_state_change(dev->priv, STATE_FORCE_TRX_OFF);
 }
 
 static int
-at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf23x_set_channel(struct at86rf230_local *lp, int page, int channel)
 {
-       lp->rssi_base_val = -91;
-
        return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
 }
 
@@ -611,10 +1053,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
 
        if (page == 0) {
                rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
-               lp->rssi_base_val = -100;
+               lp->data->rssi_base_val = -100;
        } else {
                rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
-               lp->rssi_base_val = -98;
+               lp->data->rssi_base_val = -98;
        }
        if (rc < 0)
                return rc;
@@ -636,106 +1078,19 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
                return -EINVAL;
        }
 
-       if (is_rf212(lp))
-               rc = at86rf212_set_channel(lp, page, channel);
-       else
-               rc = at86rf230_set_channel(lp, page, channel);
+       rc = lp->data->set_channel(lp, page, channel);
        if (rc < 0)
                return rc;
 
-       msleep(1); /* Wait for PLL */
+       /* Wait for PLL */
+       usleep_range(lp->data->t_channel_switch,
+                    lp->data->t_channel_switch + 10);
        dev->phy->current_channel = channel;
        dev->phy->current_page = page;
 
        return 0;
 }
 
-static int
-at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
-{
-       struct at86rf230_local *lp = dev->priv;
-       int rc;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       if  (lp->irq_busy) {
-               spin_unlock_irqrestore(&lp->lock, flags);
-               return -EBUSY;
-       }
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       might_sleep();
-
-       rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
-       if (rc)
-               goto err;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 1;
-       reinit_completion(&lp->tx_complete);
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
-       if (rc)
-               goto err_rx;
-
-       if (lp->tx_aret) {
-               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
-               if (rc)
-                       goto err_rx;
-       }
-
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
-       if (rc)
-               goto err_rx;
-
-       rc = wait_for_completion_interruptible(&lp->tx_complete);
-       if (rc < 0)
-               goto err_rx;
-
-       return at86rf230_start(dev);
-err_rx:
-       at86rf230_start(dev);
-err:
-       pr_err("error: %d\n", rc);
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       return rc;
-}
-
-static int at86rf230_rx(struct at86rf230_local *lp)
-{
-       u8 len = 128, lqi = 0;
-       struct sk_buff *skb;
-
-       skb = alloc_skb(len, GFP_KERNEL);
-
-       if (!skb)
-               return -ENOMEM;
-
-       if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
-               goto err;
-
-       if (len < 2)
-               goto err;
-
-       skb_trim(skb, len - 2); /* We do not put CRC into the frame */
-
-       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
-
-       dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
-
-       return 0;
-err:
-       pr_debug("received frame is too small\n");
-
-       kfree_skb(skb);
-       return -EINVAL;
-}
-
 static int
 at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
                           struct ieee802154_hw_addr_filt *filt,
@@ -784,7 +1139,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
 }
 
 static int
-at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -803,7 +1158,7 @@ at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
 }
 
 static int
-at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+at86rf230_set_lbt(struct ieee802154_dev *dev, bool on)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -811,7 +1166,7 @@ at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
 }
 
 static int
-at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -819,21 +1174,31 @@ at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
 }
 
 static int
-at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
+{
+       return (level - lp->data->rssi_base_val) * 100 / 207;
+}
+
+static int
+at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
+{
+       return (level - lp->data->rssi_base_val) / 2;
+}
+
+static int
+at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
 {
        struct at86rf230_local *lp = dev->priv;
-       int desens_steps;
 
-       if (level < lp->rssi_base_val || level > 30)
+       if (level < lp->data->rssi_base_val || level > 30)
                return -EINVAL;
 
-       desens_steps = (level - lp->rssi_base_val) * 100 / 207;
-
-       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
+                                     lp->data->get_desense_steps(lp, level));
 }
 
 static int
-at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
                          u8 retries)
 {
        struct at86rf230_local *lp = dev->priv;
@@ -854,7 +1219,7 @@ at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
 }
 
 static int
-at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
 {
        struct at86rf230_local *lp = dev->priv;
        int rc = 0;
@@ -878,110 +1243,84 @@ static struct ieee802154_ops at86rf230_ops = {
        .start = at86rf230_start,
        .stop = at86rf230_stop,
        .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+       .set_txpower = at86rf230_set_txpower,
+       .set_lbt = at86rf230_set_lbt,
+       .set_cca_mode = at86rf230_set_cca_mode,
+       .set_cca_ed_level = at86rf230_set_cca_ed_level,
+       .set_csma_params = at86rf230_set_csma_params,
+       .set_frame_retries = at86rf230_set_frame_retries,
 };
 
-static struct ieee802154_ops at86rf212_ops = {
-       .owner = THIS_MODULE,
-       .xmit = at86rf230_xmit,
-       .ed = at86rf230_ed,
-       .set_channel = at86rf230_channel,
-       .start = at86rf230_start,
-       .stop = at86rf230_stop,
-       .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
-       .set_txpower = at86rf212_set_txpower,
-       .set_lbt = at86rf212_set_lbt,
-       .set_cca_mode = at86rf212_set_cca_mode,
-       .set_cca_ed_level = at86rf212_set_cca_ed_level,
-       .set_csma_params = at86rf212_set_csma_params,
-       .set_frame_retries = at86rf212_set_frame_retries,
+static struct at86rf2xx_chip_data at86rf233_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 11,
+       .t_reset_to_off = 26,
+       .t_off_to_aack = 80,
+       .t_off_to_tx_on = 80,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -91,
+       .set_channel = at86rf23x_set_channel,
+       .get_desense_steps = at86rf23x_get_desens_steps
 };
 
-static void at86rf230_irqwork(struct work_struct *work)
-{
-       struct at86rf230_local *lp =
-               container_of(work, struct at86rf230_local, irqwork);
-       u8 status = 0, val;
-       int rc;
-       unsigned long flags;
-
-       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
-       status |= val;
-
-       status &= ~IRQ_PLL_LOCK; /* ignore */
-       status &= ~IRQ_RX_START; /* ignore */
-       status &= ~IRQ_AMI; /* ignore */
-       status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
-
-       if (status & IRQ_TRX_END) {
-               status &= ~IRQ_TRX_END;
-               spin_lock_irqsave(&lp->lock, flags);
-               if (lp->is_tx) {
-                       lp->is_tx = 0;
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       complete(&lp->tx_complete);
-               } else {
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       at86rf230_rx(lp);
-               }
-       }
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->irq_busy = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-static void at86rf230_irqwork_level(struct work_struct *work)
-{
-       struct at86rf230_local *lp =
-               container_of(work, struct at86rf230_local, irqwork);
-
-       at86rf230_irqwork(work);
-
-       enable_irq(lp->spi->irq);
-}
-
-static irqreturn_t at86rf230_isr(int irq, void *data)
-{
-       struct at86rf230_local *lp = data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->irq_busy = 1;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       schedule_work(&lp->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t at86rf230_isr_level(int irq, void *data)
-{
-       disable_irq_nosync(irq);
+static struct at86rf2xx_chip_data at86rf231_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 24,
+       .t_reset_to_off = 37,
+       .t_off_to_aack = 110,
+       .t_off_to_tx_on = 110,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -91,
+       .set_channel = at86rf23x_set_channel,
+       .get_desense_steps = at86rf23x_get_desens_steps
+};
 
-       return at86rf230_isr(irq, data);
-}
+static struct at86rf2xx_chip_data at86rf212_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 11,
+       .t_reset_to_off = 26,
+       .t_off_to_aack = 200,
+       .t_off_to_tx_on = 200,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -100,
+       .set_channel = at86rf212_set_channel,
+       .get_desense_steps = at86rf212_get_desens_steps
+};
 
 static int at86rf230_hw_init(struct at86rf230_local *lp)
 {
-       int rc, irq_pol, irq_type;
-       u8 dvdd;
+       int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH;
+       unsigned int dvdd;
        u8 csma_seed[2];
 
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+       rc = at86rf230_sync_state_change(lp, STATE_FORCE_TRX_OFF);
        if (rc)
                return rc;
 
        irq_type = irq_get_trigger_type(lp->spi->irq);
-       /* configure irq polarity, defaults to high active */
-       if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+       if (irq_type == IRQ_TYPE_EDGE_FALLING)
                irq_pol = IRQ_ACTIVE_LOW;
-       else
-               irq_pol = IRQ_ACTIVE_HIGH;
 
        rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
        if (rc)
                return rc;
 
+       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+       if (rc)
+               return rc;
+
        rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, IRQ_TRX_END);
        if (rc)
                return rc;
@@ -1004,7 +1343,8 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        if (rc)
                return rc;
        /* Wait the next SLEEP cycle */
-       msleep(100);
+       usleep_range(lp->data->t_sleep_cycle,
+                    lp->data->t_sleep_cycle + 100);
 
        rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
        if (rc)
@@ -1037,18 +1377,111 @@ done:
        return pdata;
 }
 
+static int
+at86rf230_detect_device(struct at86rf230_local *lp)
+{
+       unsigned int part, version, val;
+       u16 man_id = 0;
+       const char *chip;
+       int rc;
+
+       rc = __at86rf230_read(lp, RG_MAN_ID_0, &val);
+       if (rc)
+               return rc;
+       man_id |= val;
+
+       rc = __at86rf230_read(lp, RG_MAN_ID_1, &val);
+       if (rc)
+               return rc;
+       man_id |= (val << 8);
+
+       rc = __at86rf230_read(lp, RG_PART_NUM, &part);
+       if (rc)
+               return rc;
+
+       rc = __at86rf230_read(lp, RG_PART_NUM, &version);
+       if (rc)
+               return rc;
+
+       if (man_id != 0x001f) {
+               dev_err(&lp->spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+                       man_id >> 8, man_id & 0xFF);
+               return -EINVAL;
+       }
+
+       lp->dev->extra_tx_headroom = 0;
+       lp->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+                        IEEE802154_HW_TXPOWER | IEEE802154_HW_CSMA;
+
+       switch (part) {
+       case 2:
+               chip = "at86rf230";
+               rc = -ENOTSUPP;
+               break;
+       case 3:
+               chip = "at86rf231";
+               lp->data = &at86rf231_data;
+               lp->dev->phy->channels_supported[0] = 0x7FFF800;
+               break;
+       case 7:
+               chip = "at86rf212";
+               if (version == 1) {
+                       lp->data = &at86rf212_data;
+                       lp->dev->flags |= IEEE802154_HW_LBT;
+                       lp->dev->phy->channels_supported[0] = 0x00007FF;
+                       lp->dev->phy->channels_supported[2] = 0x00007FF;
+               } else {
+                       rc = -ENOTSUPP;
+               }
+               break;
+       case 11:
+               chip = "at86rf233";
+               lp->data = &at86rf233_data;
+               lp->dev->phy->channels_supported[0] = 0x7FFF800;
+               break;
+       default:
+               chip = "unkown";
+               rc = -ENOTSUPP;
+               break;
+       }
+
+       dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
+
+       return rc;
+}
+
+static void
+at86rf230_setup_spi_messages(struct at86rf230_local *lp)
+{
+       lp->state.lp = lp;
+       spi_message_init(&lp->state.msg);
+       lp->state.msg.context = &lp->state;
+       lp->state.trx.tx_buf = lp->state.buf;
+       lp->state.trx.rx_buf = lp->state.buf;
+       spi_message_add_tail(&lp->state.trx, &lp->state.msg);
+
+       lp->irq.lp = lp;
+       spi_message_init(&lp->irq.msg);
+       lp->irq.msg.context = &lp->irq;
+       lp->irq.trx.tx_buf = lp->irq.buf;
+       lp->irq.trx.rx_buf = lp->irq.buf;
+       spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
+
+       lp->tx.lp = lp;
+       spi_message_init(&lp->tx.msg);
+       lp->tx.msg.context = &lp->tx;
+       lp->tx.trx.tx_buf = lp->tx.buf;
+       lp->tx.trx.rx_buf = lp->tx.buf;
+       spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
+}
+
 static int at86rf230_probe(struct spi_device *spi)
 {
        struct at86rf230_platform_data *pdata;
        struct ieee802154_dev *dev;
        struct at86rf230_local *lp;
-       u16 man_id = 0;
-       u8 part = 0, version = 0, status;
-       irq_handler_t irq_handler;
-       work_func_t irq_worker;
+       unsigned int status;
        int rc, irq_type;
-       const char *chip;
-       struct ieee802154_ops *ops = NULL;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ specified\n");
@@ -1084,107 +1517,60 @@ static int at86rf230_probe(struct spi_device *spi)
                usleep_range(120, 240);
        }
 
-       rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
-       if (rc < 0)
-               return rc;
-
-       if (man_id != 0x001f) {
-               dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
-                       man_id >> 8, man_id & 0xFF);
-               return -EINVAL;
-       }
-
-       switch (part) {
-       case 2:
-               chip = "at86rf230";
-               /* FIXME: should be easy to support; */
-               break;
-       case 3:
-               chip = "at86rf231";
-               ops = &at86rf230_ops;
-               break;
-       case 7:
-               chip = "at86rf212";
-               if (version == 1)
-                       ops = &at86rf212_ops;
-               break;
-       case 11:
-               chip = "at86rf233";
-               ops = &at86rf230_ops;
-               break;
-       default:
-               chip = "UNKNOWN";
-               break;
-       }
-
-       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
-       if (!ops)
-               return -ENOTSUPP;
-
-       dev = ieee802154_alloc_device(sizeof(*lp), ops);
+       dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
        if (!dev)
                return -ENOMEM;
 
        lp = dev->priv;
        lp->dev = dev;
-       lp->part = part;
-       lp->vers = version;
-
        lp->spi = spi;
-
        dev->parent = &spi->dev;
-       dev->extra_tx_headroom = 0;
-       dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
 
-       irq_type = irq_get_trigger_type(spi->irq);
-       if (!irq_type)
-               irq_type = IRQF_TRIGGER_RISING;
-       if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-               irq_worker = at86rf230_irqwork;
-               irq_handler = at86rf230_isr;
-       } else {
-               irq_worker = at86rf230_irqwork_level;
-               irq_handler = at86rf230_isr_level;
+       lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config);
+       if (IS_ERR(lp->regmap)) {
+               rc = PTR_ERR(lp->regmap);
+               dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+                       rc);
+               goto free_dev;
        }
 
-       mutex_init(&lp->bmux);
-       INIT_WORK(&lp->irqwork, irq_worker);
+       at86rf230_setup_spi_messages(lp);
+
+       rc = at86rf230_detect_device(lp);
+       if (rc < 0)
+               goto free_dev;
+
        spin_lock_init(&lp->lock);
        init_completion(&lp->tx_complete);
+       init_completion(&lp->state_complete);
 
        spi_set_drvdata(spi, lp);
 
-       if (is_rf212(lp)) {
-               dev->phy->channels_supported[0] = 0x00007FF;
-               dev->phy->channels_supported[2] = 0x00007FF;
-       } else {
-               dev->phy->channels_supported[0] = 0x7FFF800;
-       }
-
        rc = at86rf230_hw_init(lp);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        /* Read irq status register to reset irq line */
        rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
+
+       irq_type = irq_get_trigger_type(spi->irq);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
 
-       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
-                             IRQF_SHARED | irq_type,
-                             dev_name(&spi->dev), lp);
+       rc = devm_request_irq(&spi->dev, spi->irq, at86rf230_isr,
+                             IRQF_SHARED | irq_type, dev_name(&spi->dev), lp);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        rc = ieee802154_register_device(lp->dev);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        return rc;
 
-err_hw_init:
-       flush_work(&lp->irqwork);
-       mutex_destroy(&lp->bmux);
+free_dev:
        ieee802154_free_device(lp->dev);
 
        return rc;
@@ -1197,8 +1583,6 @@ static int at86rf230_remove(struct spi_device *spi)
        /* mask all at86rf230 irq's */
        at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
        ieee802154_unregister_device(lp->dev);
-       flush_work(&lp->irqwork);
-       mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
        dev_dbg(&spi->dev, "unregistered at86rf230\n");
 
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
new file mode 100644 (file)
index 0000000..8a5ac7a
--- /dev/null
@@ -0,0 +1,1039 @@
+/* Driver for TI CC2520 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
+ *                   Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
+ *                   P Sowjanya <sowjanyap@cdac.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/cc2520.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_gpio.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+#include <net/ieee802154.h>
+
+#define        SPI_COMMAND_BUFFER      3
+#define        HIGH                    1
+#define        LOW                     0
+#define        STATE_IDLE              0
+#define        RSSI_VALID              0
+#define        RSSI_OFFSET             78
+
+#define        CC2520_RAM_SIZE         640
+#define        CC2520_FIFO_SIZE        128
+
+#define        CC2520RAM_TXFIFO        0x100
+#define        CC2520RAM_RXFIFO        0x180
+#define        CC2520RAM_IEEEADDR      0x3EA
+#define        CC2520RAM_PANID         0x3F2
+#define        CC2520RAM_SHORTADDR     0x3F4
+
+#define        CC2520_FREG_MASK        0x3F
+
+/* status byte values */
+#define        CC2520_STATUS_XOSC32M_STABLE    (1 << 7)
+#define        CC2520_STATUS_RSSI_VALID        (1 << 6)
+#define        CC2520_STATUS_TX_UNDERFLOW      (1 << 3)
+
+/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
+#define        CC2520_MINCHANNEL               11
+#define        CC2520_MAXCHANNEL               26
+#define        CC2520_CHANNEL_SPACING          5
+
+/* command strobes */
+#define        CC2520_CMD_SNOP                 0x00
+#define        CC2520_CMD_IBUFLD               0x02
+#define        CC2520_CMD_SIBUFEX              0x03
+#define        CC2520_CMD_SSAMPLECCA           0x04
+#define        CC2520_CMD_SRES                 0x0f
+#define        CC2520_CMD_MEMORY_MASK          0x0f
+#define        CC2520_CMD_MEMORY_READ          0x10
+#define        CC2520_CMD_MEMORY_WRITE         0x20
+#define        CC2520_CMD_RXBUF                0x30
+#define        CC2520_CMD_RXBUFCP              0x38
+#define        CC2520_CMD_RXBUFMOV             0x32
+#define        CC2520_CMD_TXBUF                0x3A
+#define        CC2520_CMD_TXBUFCP              0x3E
+#define        CC2520_CMD_RANDOM               0x3C
+#define        CC2520_CMD_SXOSCON              0x40
+#define        CC2520_CMD_STXCAL               0x41
+#define        CC2520_CMD_SRXON                0x42
+#define        CC2520_CMD_STXON                0x43
+#define        CC2520_CMD_STXONCCA             0x44
+#define        CC2520_CMD_SRFOFF               0x45
+#define        CC2520_CMD_SXOSCOFF             0x46
+#define        CC2520_CMD_SFLUSHRX             0x47
+#define        CC2520_CMD_SFLUSHTX             0x48
+#define        CC2520_CMD_SACK                 0x49
+#define        CC2520_CMD_SACKPEND             0x4A
+#define        CC2520_CMD_SNACK                0x4B
+#define        CC2520_CMD_SRXMASKBITSET        0x4C
+#define        CC2520_CMD_SRXMASKBITCLR        0x4D
+#define        CC2520_CMD_RXMASKAND            0x4E
+#define        CC2520_CMD_RXMASKOR             0x4F
+#define        CC2520_CMD_MEMCP                0x50
+#define        CC2520_CMD_MEMCPR               0x52
+#define        CC2520_CMD_MEMXCP               0x54
+#define        CC2520_CMD_MEMXWR               0x56
+#define        CC2520_CMD_BCLR                 0x58
+#define        CC2520_CMD_BSET                 0x59
+#define        CC2520_CMD_CTR_UCTR             0x60
+#define        CC2520_CMD_CBCMAC               0x64
+#define        CC2520_CMD_UCBCMAC              0x66
+#define        CC2520_CMD_CCM                  0x68
+#define        CC2520_CMD_UCCM                 0x6A
+#define        CC2520_CMD_ECB                  0x70
+#define        CC2520_CMD_ECBO                 0x72
+#define        CC2520_CMD_ECBX                 0x74
+#define        CC2520_CMD_INC                  0x78
+#define        CC2520_CMD_ABORT                0x7F
+#define        CC2520_CMD_REGISTER_READ        0x80
+#define        CC2520_CMD_REGISTER_WRITE       0xC0
+
+/* status registers */
+#define        CC2520_CHIPID                   0x40
+#define        CC2520_VERSION                  0x42
+#define        CC2520_EXTCLOCK                 0x44
+#define        CC2520_MDMCTRL0                 0x46
+#define        CC2520_MDMCTRL1                 0x47
+#define        CC2520_FREQEST                  0x48
+#define        CC2520_RXCTRL                   0x4A
+#define        CC2520_FSCTRL                   0x4C
+#define        CC2520_FSCAL0                   0x4E
+#define        CC2520_FSCAL1                   0x4F
+#define        CC2520_FSCAL2                   0x50
+#define        CC2520_FSCAL3                   0x51
+#define        CC2520_AGCCTRL0                 0x52
+#define        CC2520_AGCCTRL1                 0x53
+#define        CC2520_AGCCTRL2                 0x54
+#define        CC2520_AGCCTRL3                 0x55
+#define        CC2520_ADCTEST0                 0x56
+#define        CC2520_ADCTEST1                 0x57
+#define        CC2520_ADCTEST2                 0x58
+#define        CC2520_MDMTEST0                 0x5A
+#define        CC2520_MDMTEST1                 0x5B
+#define        CC2520_DACTEST0                 0x5C
+#define        CC2520_DACTEST1                 0x5D
+#define        CC2520_ATEST                    0x5E
+#define        CC2520_DACTEST2                 0x5F
+#define        CC2520_PTEST0                   0x60
+#define        CC2520_PTEST1                   0x61
+#define        CC2520_RESERVED                 0x62
+#define        CC2520_DPUBIST                  0x7A
+#define        CC2520_ACTBIST                  0x7C
+#define        CC2520_RAMBIST                  0x7E
+
+/* frame registers */
+#define        CC2520_FRMFILT0                 0x00
+#define        CC2520_FRMFILT1                 0x01
+#define        CC2520_SRCMATCH                 0x02
+#define        CC2520_SRCSHORTEN0              0x04
+#define        CC2520_SRCSHORTEN1              0x05
+#define        CC2520_SRCSHORTEN2              0x06
+#define        CC2520_SRCEXTEN0                0x08
+#define        CC2520_SRCEXTEN1                0x09
+#define        CC2520_SRCEXTEN2                0x0A
+#define        CC2520_FRMCTRL0                 0x0C
+#define        CC2520_FRMCTRL1                 0x0D
+#define        CC2520_RXENABLE0                0x0E
+#define        CC2520_RXENABLE1                0x0F
+#define        CC2520_EXCFLAG0                 0x10
+#define        CC2520_EXCFLAG1                 0x11
+#define        CC2520_EXCFLAG2                 0x12
+#define        CC2520_EXCMASKA0                0x14
+#define        CC2520_EXCMASKA1                0x15
+#define        CC2520_EXCMASKA2                0x16
+#define        CC2520_EXCMASKB0                0x18
+#define        CC2520_EXCMASKB1                0x19
+#define        CC2520_EXCMASKB2                0x1A
+#define        CC2520_EXCBINDX0                0x1C
+#define        CC2520_EXCBINDX1                0x1D
+#define        CC2520_EXCBINDY0                0x1E
+#define        CC2520_EXCBINDY1                0x1F
+#define        CC2520_GPIOCTRL0                0x20
+#define        CC2520_GPIOCTRL1                0x21
+#define        CC2520_GPIOCTRL2                0x22
+#define        CC2520_GPIOCTRL3                0x23
+#define        CC2520_GPIOCTRL4                0x24
+#define        CC2520_GPIOCTRL5                0x25
+#define        CC2520_GPIOPOLARITY             0x26
+#define        CC2520_GPIOCTRL                 0x28
+#define        CC2520_DPUCON                   0x2A
+#define        CC2520_DPUSTAT                  0x2C
+#define        CC2520_FREQCTRL                 0x2E
+#define        CC2520_FREQTUNE                 0x2F
+#define        CC2520_TXPOWER                  0x30
+#define        CC2520_TXCTRL                   0x31
+#define        CC2520_FSMSTAT0                 0x32
+#define        CC2520_FSMSTAT1                 0x33
+#define        CC2520_FIFOPCTRL                0x34
+#define        CC2520_FSMCTRL                  0x35
+#define        CC2520_CCACTRL0                 0x36
+#define        CC2520_CCACTRL1                 0x37
+#define        CC2520_RSSI                     0x38
+#define        CC2520_RSSISTAT                 0x39
+#define        CC2520_RXFIRST                  0x3C
+#define        CC2520_RXFIFOCNT                0x3E
+#define        CC2520_TXFIFOCNT                0x3F
+
+/* Driver private information */
+struct cc2520_private {
+       struct spi_device *spi;         /* SPI device structure */
+       struct ieee802154_dev *dev;     /* IEEE-802.15.4 device */
+       u8 *buf;                        /* SPI TX/Rx data buffer */
+       struct mutex buffer_mutex;      /* SPI buffer mutex */
+       bool is_tx;                     /* Flag for sync b/w Tx and Rx */
+       int fifo_pin;                   /* FIFO GPIO pin number */
+       struct work_struct fifop_irqwork;/* Workqueue for FIFOP */
+       spinlock_t lock;                /* Lock for is_tx*/
+       struct completion tx_complete;  /* Work completion for Tx */
+};
+
+/* Generic Functions */
+static int
+cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
+{
+       int ret;
+       u8 status = 0xff;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer.len++] = cmd;
+       dev_vdbg(&priv->spi->dev,
+                "command strobe buf[0] = %02x\n",
+                priv->buf[0]);
+
+       ret = spi_sync(priv->spi, &msg);
+       if (!ret)
+               status = priv->buf[0];
+       dev_vdbg(&priv->spi->dev,
+                "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return ret;
+}
+
+static int
+cc2520_get_status(struct cc2520_private *priv, u8 *status)
+{
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer.len++] = CC2520_CMD_SNOP;
+       dev_vdbg(&priv->spi->dev,
+                "get status command buf[0] = %02x\n", priv->buf[0]);
+
+       ret = spi_sync(priv->spi, &msg);
+       if (!ret)
+               *status = priv->buf[0];
+       dev_vdbg(&priv->spi->dev,
+                "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return ret;
+}
+
+static int
+cc2520_write_register(struct cc2520_private *priv, u8 reg, u8 value)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+
+       if (reg <= CC2520_FREG_MASK) {
+               priv->buf[xfer.len++] = CC2520_CMD_REGISTER_WRITE | reg;
+               priv->buf[xfer.len++] = value;
+       } else {
+               priv->buf[xfer.len++] = CC2520_CMD_MEMORY_WRITE;
+               priv->buf[xfer.len++] = reg;
+               priv->buf[xfer.len++] = value;
+       }
+       status = spi_sync(priv->spi, &msg);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_write_ram(struct cc2520_private *priv, u16 reg, u8 len, u8 *data)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer_head = {
+               .len        = 0,
+               .tx_buf        = priv->buf,
+               .rx_buf        = priv->buf,
+       };
+
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .tx_buf = data,
+       };
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = (CC2520_CMD_MEMORY_WRITE |
+                                               ((reg >> 8) & 0xff));
+       priv->buf[xfer_head.len++] = reg & 0xff;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_dbg(&priv->spi->dev, "spi status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+       return status;
+}
+
+static int
+cc2520_read_register(struct cc2520_private *priv, u8 reg, u8 *data)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer1 = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       struct spi_transfer xfer2 = {
+               .len = 1,
+               .rx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer1, &msg);
+       spi_message_add_tail(&xfer2, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer1.len++] = CC2520_CMD_MEMORY_READ;
+       priv->buf[xfer1.len++] = reg;
+
+       status = spi_sync(priv->spi, &msg);
+       dev_dbg(&priv->spi->dev,
+               "spi status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len)
+{
+       int status;
+
+       /* length byte must include FCS even
+        * if it is calculated in the hardware
+        */
+       int len_byte = len + 2;
+
+       struct spi_message msg;
+
+       struct spi_transfer xfer_head = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+       struct spi_transfer xfer_len = {
+               .len = 1,
+               .tx_buf = &len_byte,
+       };
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .tx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_len, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = CC2520_CMD_TXBUF;
+       dev_vdbg(&priv->spi->dev,
+                "TX_FIFO cmd buf[0] = %02x\n", priv->buf[0]);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       dev_vdbg(&priv->spi->dev, "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi)
+{
+       int status;
+       struct spi_message msg;
+
+       struct spi_transfer xfer_head = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .rx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = CC2520_CMD_RXBUF;
+
+       dev_vdbg(&priv->spi->dev, "read rxfifo buf[0] = %02x\n", priv->buf[0]);
+       dev_vdbg(&priv->spi->dev, "buf[1] = %02x\n", priv->buf[1]);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       dev_vdbg(&priv->spi->dev,
+                "return status buf[0] = %02x\n", priv->buf[0]);
+       dev_vdbg(&priv->spi->dev, "length buf[1] = %02x\n", priv->buf[1]);
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int cc2520_start(struct ieee802154_dev *dev)
+{
+       return cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRXON);
+}
+
+static void cc2520_stop(struct ieee802154_dev *dev)
+{
+       cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRFOFF);
+}
+
+static int
+cc2520_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct cc2520_private *priv = dev->priv;
+       unsigned long flags;
+       int rc;
+       u8 status = 0;
+
+       rc = cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX);
+       if (rc)
+               goto err_tx;
+
+       rc = cc2520_write_txfifo(priv, skb->data, skb->len);
+       if (rc)
+               goto err_tx;
+
+       rc = cc2520_get_status(priv, &status);
+       if (rc)
+               goto err_tx;
+
+       if (status & CC2520_STATUS_TX_UNDERFLOW) {
+               dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
+               goto err_tx;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+       BUG_ON(priv->is_tx);
+       priv->is_tx = 1;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       rc = cc2520_cmd_strobe(priv, CC2520_CMD_STXONCCA);
+       if (rc)
+               goto err;
+
+       rc = wait_for_completion_interruptible(&priv->tx_complete);
+       if (rc < 0)
+               goto err;
+
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX);
+       cc2520_cmd_strobe(priv, CC2520_CMD_SRXON);
+
+       return rc;
+err:
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->is_tx = 0;
+       spin_unlock_irqrestore(&priv->lock, flags);
+err_tx:
+       return rc;
+}
+
+
+static int cc2520_rx(struct cc2520_private *priv)
+{
+       u8 len = 0, lqi = 0, bytes = 1;
+       struct sk_buff *skb;
+
+       cc2520_read_rxfifo(priv, &len, bytes, &lqi);
+
+       if (len < 2 || len > IEEE802154_MTU)
+               return -EINVAL;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       if (cc2520_read_rxfifo(priv, skb_put(skb, len), len, &lqi)) {
+               dev_dbg(&priv->spi->dev, "frame reception failed\n");
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       skb_trim(skb, skb->len - 2);
+
+       ieee802154_rx_irqsafe(priv->dev, skb, lqi);
+
+       dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi);
+
+       return 0;
+}
+
+static int
+cc2520_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       struct cc2520_private *priv = dev->priv;
+       u8 status = 0xff;
+       u8 rssi;
+       int ret;
+
+       ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status);
+       if (ret)
+               return ret;
+
+       if (status != RSSI_VALID)
+               return -EINVAL;
+
+       ret = cc2520_read_register(priv , CC2520_RSSI, &rssi);
+       if (ret)
+               return ret;
+
+       /* level = RSSI(rssi) - OFFSET [dBm] : offset is 76dBm */
+       *level = rssi - RSSI_OFFSET;
+
+       return 0;
+}
+
+static int
+cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+       struct cc2520_private *priv = dev->priv;
+       int ret;
+
+       might_sleep();
+       dev_dbg(&priv->spi->dev, "trying to set channel\n");
+
+       BUG_ON(page != 0);
+       BUG_ON(channel < CC2520_MINCHANNEL);
+       BUG_ON(channel > CC2520_MAXCHANNEL);
+
+       ret = cc2520_write_register(priv, CC2520_FREQCTRL,
+                                   11 + 5*(channel - 11));
+
+       return ret;
+}
+
+static int
+cc2520_filter(struct ieee802154_dev *dev,
+             struct ieee802154_hw_addr_filt *filt, unsigned long changed)
+{
+       struct cc2520_private *priv = dev->priv;
+
+       if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+               u16 panid = le16_to_cpu(filt->pan_id);
+
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for pan id\n");
+               cc2520_write_ram(priv, CC2520RAM_PANID,
+                                sizeof(panid), (u8 *)&panid);
+       }
+
+       if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for IEEE addr\n");
+               cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
+                                sizeof(filt->ieee_addr),
+                                (u8 *)&filt->ieee_addr);
+       }
+
+       if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+               u16 addr = le16_to_cpu(filt->short_addr);
+
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for saddr\n");
+               cc2520_write_ram(priv, CC2520RAM_SHORTADDR,
+                                sizeof(addr), (u8 *)&addr);
+       }
+
+       if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for panc change\n");
+               if (filt->pan_coord)
+                       cc2520_write_register(priv, CC2520_FRMFILT0, 0x02);
+               else
+                       cc2520_write_register(priv, CC2520_FRMFILT0, 0x00);
+       }
+
+       return 0;
+}
+
+static struct ieee802154_ops cc2520_ops = {
+       .owner = THIS_MODULE,
+       .start = cc2520_start,
+       .stop = cc2520_stop,
+       .xmit = cc2520_tx,
+       .ed = cc2520_ed,
+       .set_channel = cc2520_set_channel,
+       .set_hw_addr_filt = cc2520_filter,
+};
+
+static int cc2520_register(struct cc2520_private *priv)
+{
+       int ret = -ENOMEM;
+
+       priv->dev = ieee802154_alloc_device(sizeof(*priv), &cc2520_ops);
+       if (!priv->dev)
+               goto err_ret;
+
+       priv->dev->priv = priv;
+       priv->dev->parent = &priv->spi->dev;
+       priv->dev->extra_tx_headroom = 0;
+
+       /* We do support only 2.4 Ghz */
+       priv->dev->phy->channels_supported[0] = 0x7FFF800;
+       priv->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+       dev_vdbg(&priv->spi->dev, "registered cc2520\n");
+       ret = ieee802154_register_device(priv->dev);
+       if (ret)
+               goto err_free_device;
+
+       return 0;
+
+err_free_device:
+       ieee802154_free_device(priv->dev);
+err_ret:
+       return ret;
+}
+
+static void cc2520_fifop_irqwork(struct work_struct *work)
+{
+       struct cc2520_private *priv
+               = container_of(work, struct cc2520_private, fifop_irqwork);
+
+       dev_dbg(&priv->spi->dev, "fifop interrupt received\n");
+
+       if (gpio_get_value(priv->fifo_pin))
+               cc2520_rx(priv);
+       else
+               dev_dbg(&priv->spi->dev, "rxfifo overflow\n");
+
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX);
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX);
+}
+
+static irqreturn_t cc2520_fifop_isr(int irq, void *data)
+{
+       struct cc2520_private *priv = data;
+
+       schedule_work(&priv->fifop_irqwork);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t cc2520_sfd_isr(int irq, void *data)
+{
+       struct cc2520_private *priv = data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (priv->is_tx) {
+               priv->is_tx = 0;
+               spin_unlock_irqrestore(&priv->lock, flags);
+               dev_dbg(&priv->spi->dev, "SFD for TX\n");
+               complete(&priv->tx_complete);
+       } else {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               dev_dbg(&priv->spi->dev, "SFD for RX\n");
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int cc2520_hw_init(struct cc2520_private *priv)
+{
+       u8 status = 0, state = 0xff;
+       int ret;
+       int timeout = 100;
+
+       ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
+       if (ret)
+               goto err_ret;
+
+       if (state != STATE_IDLE)
+               return -EINVAL;
+
+       do {
+               ret = cc2520_get_status(priv, &status);
+               if (ret)
+                       goto err_ret;
+
+               if (timeout-- <= 0) {
+                       dev_err(&priv->spi->dev, "oscillator start failed!\n");
+                       return ret;
+               }
+               udelay(1);
+       } while (!(status & CC2520_STATUS_XOSC32M_STABLE));
+
+       dev_vdbg(&priv->spi->dev, "oscillator brought up\n");
+
+       /* Registers default value: section 28.1 in Datasheet */
+       ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_MDMCTRL0, 0x85);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_MDMCTRL1, 0x14);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_RXCTRL, 0x3f);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FSCTRL, 0x5a);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FSCAL1, 0x2b);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST1, 0x0e);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST2, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMCTRL0, 0x60);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMCTRL1, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMFILT0, 0x00);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FIFOPCTRL, 127);
+       if (ret)
+               goto err_ret;
+
+       return 0;
+
+err_ret:
+       return ret;
+}
+
+static struct cc2520_platform_data *
+cc2520_get_platform_data(struct spi_device *spi)
+{
+       struct cc2520_platform_data *pdata;
+       struct device_node *np = spi->dev.of_node;
+       struct cc2520_private *priv = spi_get_drvdata(spi);
+
+       if (!np)
+               return spi->dev.platform_data;
+
+       pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               goto done;
+
+       pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
+       priv->fifo_pin = pdata->fifo;
+
+       pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
+
+       pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
+       pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
+       pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
+       pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
+
+       spi->dev.platform_data = pdata;
+
+done:
+       return pdata;
+}
+
+static int cc2520_probe(struct spi_device *spi)
+{
+       struct cc2520_private *priv;
+       struct pinctrl *pinctrl;
+       struct cc2520_platform_data *pdata;
+       int ret;
+
+       priv = devm_kzalloc(&spi->dev,
+                           sizeof(struct cc2520_private), GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto err_ret;
+       }
+
+       spi_set_drvdata(spi, priv);
+
+       pinctrl = devm_pinctrl_get_select_default(&spi->dev);
+       if (IS_ERR(pinctrl))
+               dev_warn(&spi->dev,
+                        "pinctrl pins are not configured");
+
+       pdata = cc2520_get_platform_data(spi);
+       if (!pdata) {
+               dev_err(&spi->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       priv->spi = spi;
+
+       priv->buf = devm_kzalloc(&spi->dev,
+                                SPI_COMMAND_BUFFER, GFP_KERNEL);
+       if (!priv->buf) {
+               ret = -ENOMEM;
+               goto err_ret;
+       }
+
+       mutex_init(&priv->buffer_mutex);
+       INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
+       spin_lock_init(&priv->lock);
+       init_completion(&priv->tx_complete);
+
+       /* Request all the gpio's */
+       if (!gpio_is_valid(pdata->fifo)) {
+               dev_err(&spi->dev, "fifo gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->fifo,
+                                   GPIOF_IN, "fifo");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->cca)) {
+               dev_err(&spi->dev, "cca gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->cca,
+                                   GPIOF_IN, "cca");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->fifop)) {
+               dev_err(&spi->dev, "fifop gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->fifop,
+                                   GPIOF_IN, "fifop");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->sfd)) {
+               dev_err(&spi->dev, "sfd gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->sfd,
+                                   GPIOF_IN, "sfd");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->reset)) {
+               dev_err(&spi->dev, "reset gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->reset,
+                                   GPIOF_OUT_INIT_LOW, "reset");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->vreg)) {
+               dev_err(&spi->dev, "vreg gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->vreg,
+                                   GPIOF_OUT_INIT_LOW, "vreg");
+       if (ret)
+               goto err_hw_init;
+
+
+       gpio_set_value(pdata->vreg, HIGH);
+       usleep_range(100, 150);
+
+       gpio_set_value(pdata->reset, HIGH);
+       usleep_range(200, 250);
+
+       ret = cc2520_hw_init(priv);
+       if (ret)
+               goto err_hw_init;
+
+       /* Set up fifop interrupt */
+       ret = devm_request_irq(&spi->dev,
+                              gpio_to_irq(pdata->fifop),
+                              cc2520_fifop_isr,
+                              IRQF_TRIGGER_RISING,
+                              dev_name(&spi->dev),
+                              priv);
+       if (ret) {
+               dev_err(&spi->dev, "could not get fifop irq\n");
+               goto err_hw_init;
+       }
+
+       /* Set up sfd interrupt */
+       ret = devm_request_irq(&spi->dev,
+                              gpio_to_irq(pdata->sfd),
+                              cc2520_sfd_isr,
+                              IRQF_TRIGGER_FALLING,
+                              dev_name(&spi->dev),
+                              priv);
+       if (ret) {
+               dev_err(&spi->dev, "could not get sfd irq\n");
+               goto err_hw_init;
+       }
+
+       ret = cc2520_register(priv);
+       if (ret)
+               goto err_hw_init;
+
+       return 0;
+
+err_hw_init:
+       mutex_destroy(&priv->buffer_mutex);
+       flush_work(&priv->fifop_irqwork);
+
+err_ret:
+       return ret;
+}
+
+static int cc2520_remove(struct spi_device *spi)
+{
+       struct cc2520_private *priv = spi_get_drvdata(spi);
+
+       mutex_destroy(&priv->buffer_mutex);
+       flush_work(&priv->fifop_irqwork);
+
+       ieee802154_unregister_device(priv->dev);
+       ieee802154_free_device(priv->dev);
+
+       return 0;
+}
+
+static const struct spi_device_id cc2520_ids[] = {
+       {"cc2520", },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, cc2520_ids);
+
+static const struct of_device_id cc2520_of_ids[] = {
+       {.compatible = "ti,cc2520", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cc2520_of_ids);
+
+/* SPI driver structure */
+static struct spi_driver cc2520_driver = {
+       .driver = {
+               .name = "cc2520",
+               .bus = &spi_bus_type,
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(cc2520_of_ids),
+       },
+       .id_table = cc2520_ids,
+       .probe = cc2520_probe,
+       .remove = cc2520_remove,
+};
+module_spi_driver(cc2520_driver);
+
+MODULE_AUTHOR("Varka Bhadram <varkab@cdac.in>");
+MODULE_DESCRIPTION("CC2520 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
index 78f18be3bbf2a76b7c245abef4aabd34faf3c72f..9ce854f43917ad723da924e7a99e3629c6a6e9bd 100644 (file)
@@ -343,7 +343,8 @@ static int ieee802154fake_probe(struct platform_device *pdev)
        if (!phy)
                return -ENOMEM;
 
-       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
+       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d",
+                          NET_NAME_UNKNOWN, ieee802154_fake_setup);
        if (!dev) {
                wpan_phy_free(phy);
                return -ENOMEM;
index 4048062011ba29bdafad313f06ef23aaf7be01c8..9e6a124b13f2c760d9c998d9bfbfde13c627c4dd 100644 (file)
@@ -610,10 +610,95 @@ out:
        return IRQ_HANDLED;
 }
 
+static int mrf24j40_hw_init(struct mrf24j40 *devrec)
+{
+       int ret;
+       u8 val;
+
+       /* Initialize the device.
+               From datasheet section 3.2: Initialization. */
+       ret = write_short_reg(devrec, REG_SOFTRST, 0x07);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_PACON2, 0x98);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_TXSTBL, 0x95);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON0, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON1, 0x01);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON2, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON6, 0x90);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON7, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON8, 0x10);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_SLPCON1, 0x21);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_BBREG2, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_CCAEDTH, 0x60);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_BBREG6, 0x40);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_RFCTL, 0x04);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_RFCTL, 0x0);
+       if (ret)
+               goto err_ret;
+
+       udelay(192);
+
+       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
+       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       if (ret)
+               goto err_ret;
+
+       val &= ~0x3; /* Clear RX mode (normal) */
+
+       ret = write_short_reg(devrec, REG_RXMCR, val);
+       if (ret)
+               goto err_ret;
+
+       return 0;
+
+err_ret:
+       return ret;
+}
+
 static int mrf24j40_probe(struct spi_device *spi)
 {
        int ret = -ENOMEM;
-       u8 val;
        struct mrf24j40 *devrec;
 
        printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
@@ -650,31 +735,9 @@ static int mrf24j40_probe(struct spi_device *spi)
        if (ret)
                goto err_register_device;
 
-       /* Initialize the device.
-               From datasheet section 3.2: Initialization. */
-       write_short_reg(devrec, REG_SOFTRST, 0x07);
-       write_short_reg(devrec, REG_PACON2, 0x98);
-       write_short_reg(devrec, REG_TXSTBL, 0x95);
-       write_long_reg(devrec, REG_RFCON0, 0x03);
-       write_long_reg(devrec, REG_RFCON1, 0x01);
-       write_long_reg(devrec, REG_RFCON2, 0x80);
-       write_long_reg(devrec, REG_RFCON6, 0x90);
-       write_long_reg(devrec, REG_RFCON7, 0x80);
-       write_long_reg(devrec, REG_RFCON8, 0x10);
-       write_long_reg(devrec, REG_SLPCON1, 0x21);
-       write_short_reg(devrec, REG_BBREG2, 0x80);
-       write_short_reg(devrec, REG_CCAEDTH, 0x60);
-       write_short_reg(devrec, REG_BBREG6, 0x40);
-       write_short_reg(devrec, REG_RFCTL, 0x04);
-       write_short_reg(devrec, REG_RFCTL, 0x0);
-       udelay(192);
-
-       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
-       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       ret = mrf24j40_hw_init(devrec);
        if (ret)
-               goto err_read_reg;
-       val &= ~0x3; /* Clear RX mode (normal) */
-       write_short_reg(devrec, REG_RXMCR, val);
+               goto err_hw_init;
 
        ret = devm_request_threaded_irq(&spi->dev,
                                        spi->irq,
@@ -692,7 +755,7 @@ static int mrf24j40_probe(struct spi_device *spi)
        return 0;
 
 err_irq:
-err_read_reg:
+err_hw_init:
        ieee802154_unregister_device(devrec->dev);
 err_register_device:
        ieee802154_free_device(devrec->dev);
index 46a7790be004a7653d391ce96d51ae48975baa8c..d2d4a3d2237f7c735a326a949ec55561fe0c79f7 100644 (file)
@@ -269,8 +269,8 @@ static int __init ifb_init_one(int index)
        struct ifb_private *dp;
        int err;
 
-       dev_ifb = alloc_netdev(sizeof(struct ifb_private),
-                                "ifb%d", ifb_setup);
+       dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+                              NET_NAME_UNKNOWN, ifb_setup);
 
        if (!dev_ifb)
                return -ENOMEM;
index 96fe3659012de5a6682658543a91cc025501c4e0..e638893e98a9784173867899d0c3a695e2df53da 100644 (file)
@@ -553,8 +553,8 @@ static int kingsun_probe(struct usb_interface *intf,
        return 0;
 
 free_mem:
-       if (kingsun->out_buf) kfree(kingsun->out_buf);
-       if (kingsun->in_buf) kfree(kingsun->in_buf);
+       kfree(kingsun->out_buf);
+       kfree(kingsun->in_buf);
        free_netdev(net);
 err_out1:
        return ret;
index bb96409f8c056b85b77255f1c397edb4b28b0511..8f2262540561caf546ea53740a20795b10f51205 100644 (file)
@@ -195,7 +195,7 @@ static __net_init int loopback_net_init(struct net *net)
        int err;
 
        err = -ENOMEM;
-       dev = alloc_netdev(0, "lo", loopback_setup);
+       dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup);
        if (!dev)
                goto out;
 
index 9408157a246c8e20cc9de5ec018bdbfc42d7cf34..255c21ff274cc120040fef0dd3d336cec16d831c 100644 (file)
@@ -40,6 +40,7 @@
 #define LAYER2         0x01
 #define MAX_RXTS       64
 #define N_EXT_TS       6
+#define N_PER_OUT      7
 #define PSF_PTPVER     2
 #define PSF_EVNT       0x4000
 #define PSF_RX         0x2000
@@ -47,7 +48,6 @@
 #define EXT_EVENT      1
 #define CAL_EVENT      7
 #define CAL_TRIGGER    7
-#define PER_TRIGGER    6
 #define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
@@ -300,23 +300,23 @@ static u64 phy2txts(struct phy_txts *p)
 }
 
 static int periodic_output(struct dp83640_clock *clock,
-                          struct ptp_clock_request *clkreq, bool on)
+                          struct ptp_clock_request *clkreq, bool on,
+                          int trigger)
 {
        struct dp83640_private *dp83640 = clock->chosen;
        struct phy_device *phydev = dp83640->phydev;
        u32 sec, nsec, pwidth;
-       u16 gpio, ptp_trig, trigger, val;
+       u16 gpio, ptp_trig, val;
 
        if (on) {
-               gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
+               gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
+                                       trigger);
                if (gpio < 1)
                        return -EINVAL;
        } else {
                gpio = 0;
        }
 
-       trigger = PER_TRIGGER;
-
        ptp_trig = TRIG_WR |
                (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
                (gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
@@ -353,6 +353,11 @@ static int periodic_output(struct dp83640_clock *clock,
        ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16);       /* sec[31:16] */
        ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
        ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16);    /* ns[31:16] */
+       /* Triggers 0 and 1 has programmable pulsewidth2 */
+       if (trigger < 2) {
+               ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff);
+               ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16);
+       }
 
        /*enable trigger*/
        val &= ~TRIG_LOAD;
@@ -491,9 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
-               if (rq->perout.index != 0)
+               if (rq->perout.index >= N_PER_OUT)
                        return -EINVAL;
-               return periodic_output(clock, rq, on);
+               return periodic_output(clock, rq, on, rq->perout.index);
 
        default:
                break;
@@ -505,6 +510,16 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
 static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
                              enum ptp_pin_function func, unsigned int chan)
 {
+       struct dp83640_clock *clock =
+               container_of(ptp, struct dp83640_clock, caps);
+
+       if (clock->caps.pin_config[pin].func == PTP_PF_PHYSYNC &&
+           !list_empty(&clock->phylist))
+               return 1;
+
+       if (func == PTP_PF_PHYSYNC)
+               return 1;
+
        return 0;
 }
 
@@ -594,7 +609,11 @@ static void recalibrate(struct dp83640_clock *clock)
        u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
 
        trigger = CAL_TRIGGER;
-       cal_gpio = gpio_tab[CALIBRATE_GPIO];
+       cal_gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
+       if (cal_gpio < 1) {
+               pr_err("PHY calibration pin not avaible - PHY is not calibrated.");
+               return;
+       }
 
        mutex_lock(&clock->extreg_lock);
 
@@ -736,6 +755,9 @@ static int decode_evnt(struct dp83640_private *dp83640,
        event.type = PTP_CLOCK_EXTTS;
        event.timestamp = phy2txts(&dp83640->edata);
 
+       /* Compensate for input path and synchronization delays */
+       event.timestamp -= 35;
+
        for (i = 0; i < N_EXT_TS; i++) {
                if (ext_status & exts_chan_to_edata(i)) {
                        event.index = i;
@@ -837,20 +859,18 @@ static int is_sync(struct sk_buff *skb, int type)
        u8 *data = skb->data, *msgtype;
        unsigned int offset = 0;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
@@ -870,25 +890,23 @@ static int is_sync(struct sk_buff *skb, int type)
 static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 {
        u16 *seqid;
-       unsigned int offset;
+       unsigned int offset = 0;
        u8 *msgtype, *data = skb_mac_header(skb);
 
        /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
@@ -944,7 +962,7 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
        clock->caps.max_adj     = 1953124;
        clock->caps.n_alarm     = 0;
        clock->caps.n_ext_ts    = N_EXT_TS;
-       clock->caps.n_per_out   = 1;
+       clock->caps.n_per_out   = N_PER_OUT;
        clock->caps.n_pins      = DP83640_N_PINS;
        clock->caps.pps         = 0;
        clock->caps.adjfreq     = ptp_dp83640_adjfreq;
index f7c61812ea4aa20fe0141b60b912805cd2ea9eda..e56e269a6eb3a0949954cbe685c26aa203bf9e38 100644 (file)
@@ -138,10 +138,30 @@ struct phy_setting {
 /* A mapping of all SUPPORTED settings to speed/duplex */
 static const struct phy_setting settings[] = {
        {
-               .speed = 10000,
+               .speed = SPEED_10000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_10000baseKR_Full,
+       },
+       {
+               .speed = SPEED_10000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_10000baseKX4_Full,
+       },
+       {
+               .speed = SPEED_10000,
                .duplex = DUPLEX_FULL,
                .setting = SUPPORTED_10000baseT_Full,
        },
+       {
+               .speed = SPEED_2500,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_2500baseX_Full,
+       },
+       {
+               .speed = SPEED_1000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_1000baseKX_Full,
+       },
        {
                .speed = SPEED_1000,
                .duplex = DUPLEX_FULL,
index 35d753d22f78b91d643548029df5b2e6eea64d49..4f4568ef124e4662350c27196092585d46915501 100644 (file)
@@ -696,6 +696,7 @@ int phy_suspend(struct phy_device *phydev)
                return phydrv->suspend(phydev);
        return 0;
 }
+EXPORT_SYMBOL(phy_suspend);
 
 int phy_resume(struct phy_device *phydev)
 {
@@ -705,6 +706,7 @@ int phy_resume(struct phy_device *phydev)
                return phydrv->resume(phydev);
        return 0;
 }
+EXPORT_SYMBOL(phy_resume);
 
 /* Generic PHY support and helper functions */
 
index d5b77ef3a2100c3ce42ad75f4e1c9fe981f046e9..765248b42a0aac6a42ad9ad0faf1c890f0f2ea1b 100644 (file)
@@ -655,6 +655,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        break;
                ppp_lock(ppp);
                cflags = ppp->flags & ~val;
+#ifdef CONFIG_PPP_MULTILINK
+               if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
+                       ppp->nextseq = 0;
+#endif
                ppp->flags = val & SC_FLAG_BITS;
                ppp_unlock(ppp);
                if (cflags & SC_CCP_OPEN)
@@ -2669,7 +2673,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        int ret = -ENOMEM;
        int i;
 
-       dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
+       dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_UNKNOWN,
+                          ppp_setup);
        if (!dev)
                goto out1;
 
index 1252d9c726a77c4692e12ca095eff80d2a4097ad..079f7adfcde5ef0fe7bf4637712ef3ac738c2f1e 100644 (file)
@@ -396,7 +396,6 @@ found:
                   ntohs(cs->cs_ip.tot_len) == hlen)
                        break;
                goto uncompressed;
-               break;
        case SPECIAL_I:
        case SPECIAL_D:
                /* actual changes match one of our special case encodings --
index 87526443841f6863c16bc59a01c1361073743cd8..05387b1e2e95e53b1b3abaef034104ea2b26b163 100644 (file)
@@ -749,7 +749,7 @@ static struct slip *sl_alloc(dev_t line)
                return NULL;
 
        sprintf(name, "sl%d", i);
-       dev = alloc_netdev(sizeof(*sl), name, sl_setup);
+       dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup);
        if (!dev)
                return NULL;
 
index 98bad1fb1bfb1ce66ea4219c2e767256f05d6cbb..acaaf6784179b04bf227de6fefb770f3e744a231 100644 (file)
@@ -1633,7 +1633,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        name = ifr->ifr_name;
 
                dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
-                                      tun_setup, queues, queues);
+                                      NET_NAME_UNKNOWN, tun_setup, queues,
+                                      queues);
 
                if (!dev)
                        return -ENOMEM;
index 6358d420e185b4d4cb7b64a0c2a459fc5b70ed7f..2ec1500d0077529c0fd32d74c1abeb9d5f373aa8 100644 (file)
@@ -387,7 +387,7 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
                return -EINVAL;
 
        dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
-                               ifname, usbpn_setup);
+                          ifname, NET_NAME_UNKNOWN, usbpn_setup);
        if (!dev)
                return -ENOMEM;
 
index a4272ed62da865170cd9de7622e7c48875ed9f5c..a36401802cec304ab2c00f0e0ea38d22c0cc0054 100644 (file)
@@ -2504,7 +2504,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
 
        /* allocate our network device, then we can put in our private data */
        /* call hso_net_init to do the basic initialization */
-       net = alloc_netdev(sizeof(struct hso_net), "hso%d", hso_net_init);
+       net = alloc_netdev(sizeof(struct hso_net), "hso%d", NET_NAME_UNKNOWN,
+                          hso_net_init);
        if (!net) {
                dev_err(&interface->dev, "Unable to create ethernet device\n");
                goto exit;
index 7bad2d316637ab1ac8f7d83d197facbaf3e4495e..e1e430587868560e67fc03b3f4a4531586ca4d51 100644 (file)
@@ -59,6 +59,7 @@
 #define PLA_WDT6_CTRL          0xe428
 #define PLA_TCR0               0xe610
 #define PLA_TCR1               0xe612
+#define PLA_MTPS               0xe615
 #define PLA_TXFIFO_CTRL                0xe618
 #define PLA_RSTTALLY           0xe800
 #define PLA_CR                 0xe813
 /* PLA_TCR1 */
 #define VERSION_MASK           0x7cf0
 
+/* PLA_MTPS */
+#define MTPS_JUMBO             (12 * 1024 / 64)
+#define MTPS_DEFAULT           (6 * 1024 / 64)
+
 /* PLA_RSTTALLY */
 #define TALLY_RESET            0x0001
 
@@ -440,8 +445,11 @@ enum rtl_register_content {
 #define BYTE_EN_START_MASK     0x0f
 #define BYTE_EN_END_MASK       0xf0
 
+#define RTL8153_MAX_PACKET     9216 /* 9K */
+#define RTL8153_MAX_MTU                (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - VLAN_HLEN)
 #define RTL8152_RMS            (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
-#define RTL8152_TX_TIMEOUT     (HZ)
+#define RTL8153_RMS            RTL8153_MAX_PACKET
+#define RTL8152_TX_TIMEOUT     (5 * HZ)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -2522,7 +2530,8 @@ static void r8153_first_init(struct r8152 *tp)
        ocp_data &= ~CPCR_RX_VLAN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
        ocp_data |= TCR0_AUTO_FIFO;
@@ -2572,7 +2581,7 @@ static void r8153_enter_oob(struct r8152 *tp)
                mdelay(1);
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
        ocp_data &= ~TEREDO_WAKE_MASK;
@@ -3289,6 +3298,26 @@ out:
        return res;
 }
 
+static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct r8152 *tp = netdev_priv(dev);
+
+       switch (tp->version) {
+       case RTL_VER_01:
+       case RTL_VER_02:
+               return eth_change_mtu(dev, new_mtu);
+       default:
+               break;
+       }
+
+       if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+
+       return 0;
+}
+
 static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_open               = rtl8152_open,
        .ndo_stop               = rtl8152_close,
@@ -3297,8 +3326,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_tx_timeout         = rtl8152_tx_timeout,
        .ndo_set_rx_mode        = rtl8152_set_rx_mode,
        .ndo_set_mac_address    = rtl8152_set_mac_address,
-
-       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_change_mtu         = rtl8152_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
 };
 
index b4a10bcb66a0f62be1606fa34629d120913fc74d..8ad596573d1783d512ba4b40e2044909850e7f52 100644 (file)
@@ -248,6 +248,21 @@ static void veth_dev_free(struct net_device *dev)
        free_netdev(dev);
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void veth_poll_controller(struct net_device *dev)
+{
+       /* veth only receives frames when its peer sends one
+        * Since it's a synchronous operation, we are guaranteed
+        * never to have pending data when we poll for it so
+        * there is nothing to do here.
+        *
+        * We need this though so netpoll recognizes us as an interface that
+        * supports polling, which enables bridge devices in virt setups to
+        * still use netconsole
+        */
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct net_device_ops veth_netdev_ops = {
        .ndo_init            = veth_dev_init,
        .ndo_open            = veth_open,
@@ -257,6 +272,9 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_get_stats64     = veth_get_stats64,
        .ndo_set_rx_mode     = veth_set_multicast_list,
        .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = veth_poll_controller,
+#endif
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
@@ -317,6 +335,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        struct veth_priv *priv;
        char ifname[IFNAMSIZ];
        struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
+       unsigned char name_assign_type;
        struct ifinfomsg *ifmp;
        struct net *net;
 
@@ -344,16 +363,20 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
                tbp = tb;
        }
 
-       if (tbp[IFLA_IFNAME])
+       if (tbp[IFLA_IFNAME]) {
                nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
-       else
+               name_assign_type = NET_NAME_USER;
+       } else {
                snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
+               name_assign_type = NET_NAME_ENUM;
+       }
 
        net = rtnl_link_get_net(src_net, tbp);
        if (IS_ERR(net))
                return PTR_ERR(net);
 
-       peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp);
+       peer = rtnl_create_link(net, ifname, name_assign_type,
+                               &veth_link_ops, tbp);
        if (IS_ERR(peer)) {
                put_net(net);
                return PTR_ERR(peer);
index ade33ef82823b230a34890d77af039d8531aefb7..d3f3e5d21874672d18d84bfdc71043c6480d5bbd 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/ip_tunnels.h>
 #include <net/icmp.h>
 #include <net/udp.h>
+#include <net/udp_tunnel.h>
 #include <net/rtnetlink.h>
 #include <net/route.h>
 #include <net/dsfield.h>
@@ -933,7 +934,8 @@ out:
 
 /* Dump forwarding table */
 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-                         struct net_device *dev, int idx)
+                         struct net_device *dev,
+                         struct net_device *filter_dev, int idx)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
@@ -1570,25 +1572,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
        return false;
 }
 
-/* Compute source port for outgoing packet
- *   first choice to use L4 flow hash since it will spread
- *     better and maybe available from hardware
- *   secondary choice is to use jhash on the Ethernet header
- */
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
-{
-       unsigned int range = (port_max - port_min) + 1;
-       u32 hash;
-
-       hash = skb_get_hash(skb);
-       if (!hash)
-               hash = jhash(skb->data, 2 * ETH_ALEN,
-                            (__force u32) skb->protocol);
-
-       return htons((((u64) hash * range) >> 32) + port_min);
-}
-EXPORT_SYMBOL_GPL(vxlan_src_port);
-
 static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
                                                    bool udp_csum)
 {
@@ -1807,7 +1790,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
+       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
+                                    vxlan->port_max, true);
 
        if (dst->sa.sa_family == AF_INET) {
                memset(&fl4, 0, sizeof(fl4));
@@ -2235,7 +2219,6 @@ static void vxlan_setup(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
-       int low, high;
 
        eth_hw_addr_random(dev);
        ether_setup(dev);
@@ -2272,9 +2255,6 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(dev_net(dev), &low, &high);
-       vxlan->port_min = low;
-       vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
 
        vxlan->dev = dev;
@@ -2360,102 +2340,37 @@ static void vxlan_del_work(struct work_struct *work)
        kfree_rcu(vs, rcu);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-/* Create UDP socket for encapsulation receive. AF_INET6 socket
- * could be used for both IPv4 and IPv6 communications, but
- * users may set bindv6only=1.
- */
-static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
+static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
+                                       __be16 port, u32 flags)
 {
-       struct sock *sk;
        struct socket *sock;
-       struct sockaddr_in6 vxlan_addr = {
-               .sin6_family = AF_INET6,
-               .sin6_port = port,
-       };
-       int rc, val = 1;
-
-       rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
-       if (rc < 0) {
-               pr_debug("UDPv6 socket create failed\n");
-               return ERR_PTR(rc);
-       }
-
-       /* Put in proper namespace */
-       sk = sock->sk;
-       sk_change_net(sk, net);
-
-       kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
-                         (char *)&val, sizeof(val));
-       rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
-                        sizeof(struct sockaddr_in6));
-       if (rc < 0) {
-               pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
-                        &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
-               sk_release_kernel(sk);
-               return ERR_PTR(rc);
-       }
-       /* At this point, IPv6 module should have been loaded in
-        * sock_create_kern().
-        */
-       BUG_ON(!ipv6_stub);
-
-       /* Disable multicast loopback */
-       inet_sk(sk)->mc_loop = 0;
-
-       if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
-               udp_set_no_check6_tx(sk, true);
-
-       if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
-               udp_set_no_check6_rx(sk, true);
-
-       return sock;
-}
-
-#else
-
-static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
-{
-               return ERR_PTR(-EPFNOSUPPORT);
-}
-#endif
+       struct udp_port_cfg udp_conf;
+       int err;
 
-static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
-{
-       struct sock *sk;
-       struct socket *sock;
-       struct sockaddr_in vxlan_addr = {
-               .sin_family = AF_INET,
-               .sin_addr.s_addr = htonl(INADDR_ANY),
-               .sin_port = port,
-       };
-       int rc;
+       memset(&udp_conf, 0, sizeof(udp_conf));
 
-       /* Create UDP socket for encapsulation receive. */
-       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
-       if (rc < 0) {
-               pr_debug("UDP socket create failed\n");
-               return ERR_PTR(rc);
+       if (ipv6) {
+               udp_conf.family = AF_INET6;
+               udp_conf.use_udp6_tx_checksums =
+                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+               udp_conf.use_udp6_rx_checksums =
+                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+       } else {
+               udp_conf.family = AF_INET;
+               udp_conf.local_ip.s_addr = INADDR_ANY;
+               udp_conf.use_udp_checksums =
+                   !!(flags & VXLAN_F_UDP_CSUM);
        }
 
-       /* Put in proper namespace */
-       sk = sock->sk;
-       sk_change_net(sk, net);
+       udp_conf.local_udp_port = port;
 
-       rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
-                        sizeof(vxlan_addr));
-       if (rc < 0) {
-               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
-                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
-               sk_release_kernel(sk);
-               return ERR_PTR(rc);
-       }
+       /* Open UDP socket */
+       err = udp_sock_create(net, &udp_conf, &sock);
+       if (err < 0)
+               return ERR_PTR(err);
 
        /* Disable multicast loopback */
-       inet_sk(sk)->mc_loop = 0;
-
-       if (!(flags & VXLAN_F_UDP_CSUM))
-               sock->sk->sk_no_check_tx = 1;
+       inet_sk(sock->sk)->mc_loop = 0;
 
        return sock;
 }
@@ -2481,10 +2396,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 
        INIT_WORK(&vs->del_work, vxlan_del_work);
 
-       if (ipv6)
-               sock = create_v6_sock(net, port, flags);
-       else
-               sock = create_v4_sock(net, port, flags);
+       sock = vxlan_create_sock(net, ipv6, port, flags);
        if (IS_ERR(sock)) {
                kfree(vs);
                return ERR_CAST(sock);
index 19f7cb2cdef3c133fa2d04b4b2560d0acc6c2c35..43c9960dce1c4bd89195add029eefe26d0bfd1a6 100644 (file)
@@ -255,7 +255,6 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                return -EINVAL;
 
                        return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
-                       break;
 
                default: 
                        return -EOPNOTSUPP;
@@ -327,8 +326,8 @@ static int dlci_add(struct dlci_add *dlci)
                goto err1;
 
        /* create device name */
-       master = alloc_netdev( sizeof(struct dlci_local), "dlci%d",
-                             dlci_setup);
+       master = alloc_netdev(sizeof(struct dlci_local), "dlci%d",
+                             NET_NAME_UNKNOWN, dlci_setup);
        if (!master) {
                err = -ENOMEM;
                goto err1;
index 9c33ca918e19f7c03bc66ee8aff4b5365ec031eb..51f6cee8aab2d805147b9741ea49fad3747d7cfa 100644 (file)
@@ -256,7 +256,8 @@ static void hdlc_setup(struct net_device *dev)
 struct net_device *alloc_hdlcdev(void *priv)
 {
        struct net_device *dev;
-       dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
+       dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d",
+                          NET_NAME_UNKNOWN, hdlc_setup);
        if (dev)
                dev_to_hdlc(dev)->priv = priv;
        return dev;
index 7c6cb4f31798881ab24644a33791775e0ae1daf3..7cc64eac0fa3b78b29d5cd667c143f2d9c24b79d 100644 (file)
@@ -1075,10 +1075,11 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        used = pvc_is_used(pvc);
 
        if (type == ARPHRD_ETHER) {
-               dev = alloc_netdev(0, "pvceth%d", ether_setup);
+               dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
+                                  ether_setup);
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        } else
-               dev = alloc_netdev(0, "pvc%d", pvc_setup);
+               dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
 
        if (!dev) {
                netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
index a33a46fa88dd0216871502622a0f06b976ac94d5..2f5eda8a7227f922df5c41229df8230fd8e3e404 100644 (file)
@@ -325,8 +325,8 @@ static int lapbeth_new_device(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", 
-                          lapbeth_setup);
+       ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
+                           lapbeth_setup);
        if (!ndev)
                goto out;
 
index 1b89ecf0959e11ae4136e662dba68f344337d455..758c4ba1e97c91389ad6e5d1e6cda37f0dae26fa 100644 (file)
@@ -227,7 +227,8 @@ int __init sbni_probe(int unit)
        struct net_device *dev;
        int err;
 
-       dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
+       dev = alloc_netdev(sizeof(struct net_local), "sbni",
+                          NET_NAME_UNKNOWN, sbni_devsetup);
        if (!dev)
                return -ENOMEM;
 
@@ -1477,8 +1478,8 @@ int __init init_module( void )
        int err;
 
        while( num < SBNI_MAX_NUM_CARDS ) {
-               dev = alloc_netdev(sizeof(struct net_local), 
-                                  "sbni%d", sbni_devsetup);
+               dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
+                                  NET_NAME_UNKNOWN, sbni_devsetup);
                if( !dev)
                        break;
 
index cdd45fb8a1f6892587abddf2abbf0963bd7a5653..421ac5f856994731fc7a0f3752c32dcebda15bf1 100644 (file)
@@ -1631,7 +1631,8 @@ static int __init init_sdla(void)
 
        printk("%s.\n", version);
 
-       sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla);
+       sdla = alloc_netdev(sizeof(struct frad_local), "sdla0",
+                           NET_NAME_UNKNOWN, setup_sdla);
        if (!sdla) 
                return -ENOMEM;
 
index fa9fdfa128c1e6b732c74907fb43504c8ec2ad4c..5c47b011a9d7f3000a27d4c81d3dc75e5f855129 100644 (file)
@@ -81,8 +81,8 @@ static struct x25_asy *x25_asy_alloc(void)
                char name[IFNAMSIZ];
                sprintf(name, "x25asy%d", i);
 
-               dev = alloc_netdev(sizeof(struct x25_asy),
-                                  name, x25_asy_setup);
+               dev = alloc_netdev(sizeof(struct x25_asy), name,
+                                  NET_NAME_UNKNOWN, x25_asy_setup);
                if (!dev)
                        return NULL;
 
index cd15a93d9084377ce4a8ce79d68c9b49b7fb9845..e7f5910a65191f4f013ae53db73d2a77510ae9e8 100644 (file)
@@ -472,7 +472,7 @@ int i2400mu_probe(struct usb_interface *iface,
 
        /* Allocate instance [calls i2400m_netdev_setup() on it]. */
        result = -ENOMEM;
-       net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d",
+       net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d", NET_NAME_UNKNOWN,
                               i2400mu_netdev_setup);
        if (net_dev == NULL) {
                dev_err(dev, "no memory for network device instance\n");
index 64747d457bb3a1879f9cb0b478836dea4cb3099e..b39807579a8aa631c17b64f44465937f33a2e1ce 100644 (file)
@@ -2685,7 +2685,8 @@ static struct net_device *init_wifidev(struct airo_info *ai,
                                        struct net_device *ethdev)
 {
        int err;
-       struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
+       struct net_device *dev = alloc_netdev(0, "wifi%d", NET_NAME_UNKNOWN,
+                                             wifi_setup);
        if (!dev)
                return NULL;
        dev->ml_priv = ethdev->ml_priv;
@@ -2785,7 +2786,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
        CapabilityRid cap_rid;
 
        /* Create the network device object. */
-       dev = alloc_netdev(sizeof(*ai), "", ether_setup);
+       dev = alloc_netdev(sizeof(*ai), "", NET_NAME_UNKNOWN, ether_setup);
        if (!dev) {
                airo_print_err("", "Couldn't alloc_etherdev");
                return NULL;
@@ -7817,7 +7818,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
        case AIRORRID:      ridcode = comp->ridnum;     break;
        default:
                return -EINVAL;
-               break;
        }
 
        if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
index d48776e4f343e050787510923bc01e689a2bd9cc..334c2ece855a92af6576b01ec87cd47af58b8540 100644 (file)
@@ -1955,8 +1955,9 @@ static void at76_dwork_hw_scan(struct work_struct *work)
 
 static int at76_hw_scan(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
-                       struct cfg80211_scan_request *req)
+                       struct ieee80211_scan_request *hw_req)
 {
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct at76_priv *priv = hw->priv;
        struct at76_req_scan scan;
        u8 *ssid = NULL;
index a889fd66fc63190ec7f393ae8e64a6d117281267..fd9e5305e77fd263cb55ecb46ff08e6d06ca0935 100644 (file)
@@ -63,6 +63,7 @@ enum ath_op_flags {
        ATH_OP_PRIM_STA_VIF,
        ATH_OP_HW_RESET,
        ATH_OP_SCANNING,
+       ATH_OP_MULTI_CHANNEL,
 };
 
 enum ath_bus_type {
index a21080028c54eeedff71b9b00a327620d9780dc3..b8314a534972a6fc51fac13e69c3ac26f059c2dc 100644 (file)
@@ -3137,10 +3137,11 @@ exit:
 
 static int ath10k_hw_scan(struct ieee80211_hw *hw,
                          struct ieee80211_vif *vif,
-                         struct cfg80211_scan_request *req)
+                         struct ieee80211_scan_request *hw_req)
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct wmi_start_scan_arg arg;
        int ret = 0;
        int i;
index 74bd54d6acebaeb2052d0184898a6b7e13f27a49..85316bb3f8c66a51baffad61a5b306337f4ec04f 100644 (file)
@@ -1285,6 +1285,7 @@ struct ath5k_hw {
 #define ATH_STAT_STARTED       3               /* opened & irqs enabled */
 
        unsigned int            filter_flags;   /* HW flags, AR5K_RX_FILTER_* */
+       unsigned int            fif_filter_flags; /* Current FIF_* filter flags */
        struct ieee80211_channel *curchan;      /* current h/w channel */
 
        u16                     nvifs;
index 4b18434ba697c20c048e457bcdbc1c788c41da9b..8ad2550bce7ff4820511a606a30d10f81e09b91d 100644 (file)
@@ -1382,6 +1382,9 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
        rxs->flag = 0;
        if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
                rxs->flag |= RX_FLAG_MMIC_ERROR;
+       if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
+               rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+
 
        /*
         * always extend the mac timestamp, since this information is
@@ -1449,6 +1452,8 @@ ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
        ah->stats.rx_bytes_count += rs->rs_datalen;
 
        if (unlikely(rs->rs_status)) {
+               unsigned int filters;
+
                if (rs->rs_status & AR5K_RXERR_CRC)
                        ah->stats.rxerr_crc++;
                if (rs->rs_status & AR5K_RXERR_FIFO)
@@ -1457,7 +1462,20 @@ ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
                        ah->stats.rxerr_phy++;
                        if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
                                ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
-                       return false;
+
+                       /*
+                        * Treat packets that underwent a CCK or OFDM reset as having a bad CRC.
+                        * These restarts happen when the radio resynchronizes to a stronger frame
+                        * while receiving a weaker frame. Here we receive the prefix of the weak
+                        * frame. Since these are incomplete packets, mark their CRC as invalid.
+                        */
+                       if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
+                           rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
+                               rs->rs_status |= AR5K_RXERR_CRC;
+                               rs->rs_status &= ~AR5K_RXERR_PHY;
+                       } else {
+                               return false;
+                       }
                }
                if (rs->rs_status & AR5K_RXERR_DECRYPT) {
                        /*
@@ -1480,8 +1498,15 @@ ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
                        return true;
                }
 
-               /* reject any frames with non-crypto errors */
-               if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
+               /*
+                * Reject any frames with non-crypto errors, and take into account the
+                * current FIF_* filters.
+                */
+               filters = AR5K_RXERR_DECRYPT;
+               if (ah->fif_filter_flags & FIF_FCSFAIL)
+                       filters |= AR5K_RXERR_CRC;
+
+               if (rs->rs_status & ~filters)
                        return false;
        }
 
index afb23b3cc7be64d6b0ae8f79d46a3a9c9d07eab3..b65c38fdaa4b23723ac8f59add51e46acc7d16f1 100644 (file)
@@ -473,6 +473,8 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
        /* Set the cached hw filter flags, this will later actually
         * be set in HW */
        ah->filter_flags = rfilt;
+       /* Store current FIF filter flags */
+       ah->fif_filter_flags = *new_flags;
 
        mutex_unlock(&ah->lock);
 }
index 0e26f4a34fda329910ecc278de9fe7bea8fa6c57..1c4ce8e3eebee3cd587d938de99e342024f8d7f4 100644 (file)
@@ -3636,7 +3636,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
        struct net_device *ndev;
        struct ath6kl_vif *vif;
 
-       ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+       ndev = alloc_netdev(sizeof(*vif), name, NET_NAME_UNKNOWN, ether_setup);
        if (!ndev)
                return NULL;
 
index d5ef211f261c2c19e6e8deeef985dc2b83130794..8ee7097f0b259da5f0604d389f5003aa892053af 100644 (file)
@@ -1187,7 +1187,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
        default:
                WARN_ON(1);
                return -EINVAL;
-               break;
        }
 
        if (board_ext_address &&
index 8fcd586d1c3980c413c8499cb3aefd518f507f69..6b4020a5798477e3ab21b22ce2219aea3cf6553c 100644 (file)
@@ -5,7 +5,8 @@ ath9k-y +=      beacon.o \
                recv.o \
                xmit.o \
                link.o \
-               antenna.o
+               antenna.o \
+               channel.o
 
 ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
index 235053ba773765392d08427916bfb398ff52d053..80c6eacbda53349727fe4bfba72e32eb2abe28fd 100644 (file)
@@ -3535,7 +3535,8 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
 {
        int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
 
-       if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+       if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+           AR_SREV_9531(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
        else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
                REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
index ec1da0cc25f53b14412414f2d3aebef7eb1224cc..ddef9eedbac6f0d5e0b2ea2d0bfacda4f96e63c1 100644 (file)
@@ -314,10 +314,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
                               qca953x_1p0_mac_core);
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
                               qca953x_1p0_mac_postamble);
-               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
-                              qca953x_1p0_baseband_core);
-               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
-                              qca953x_1p0_baseband_postamble);
+               if (AR_SREV_9531_20(ah)) {
+                       INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                                      qca953x_2p0_baseband_core);
+                       INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                                      qca953x_2p0_baseband_postamble);
+               } else {
+                       INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                                      qca953x_1p0_baseband_core);
+                       INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                                      qca953x_1p0_baseband_postamble);
+               }
                INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
                               qca953x_1p0_radio_core);
                INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
index 8927fc34d84c2f009e192dc9a37cb140be941e54..542a8d51d3b033bd21521a6b34972107eb44ff04 100644 (file)
@@ -1552,13 +1552,15 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
                                      u8 *ini_reloaded)
 {
        unsigned int regWrites = 0;
-       u32 modesIndex;
+       u32 modesIndex, txgain_index;
 
        if (IS_CHAN_5GHZ(chan))
                modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
        else
                modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
+       txgain_index = AR_SREV_9531(ah) ? 1 : modesIndex;
+
        if (modesIndex == ah->modes_index) {
                *ini_reloaded = false;
                goto set_rfmode;
@@ -1573,7 +1575,7 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
                ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
                                   modesIndex);
 
-       REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+       REG_WRITE_ARRAY(&ah->iniModesTxGain, txgain_index, regWrites);
 
        if (AR_SREV_9462_20_OR_LATER(ah)) {
                /*
index 8e5c3b9786e3ac3fab8cf42e0d80bf84f268d4d2..812a9d787bf353236ebc906450583abce3c58a97 100644 (file)
@@ -219,7 +219,7 @@ static const u32 qca953x_1p0_baseband_core[][2] = {
        {0x00009d04, 0x40206c10},
        {0x00009d08, 0x009c4060},
        {0x00009d0c, 0x9883800a},
-       {0x00009d10, 0x01884061},
+       {0x00009d10, 0x018848c6},
        {0x00009d14, 0x00c0040b},
        {0x00009d18, 0x00000000},
        {0x00009e08, 0x0038230c},
@@ -715,4 +715,203 @@ static const u32 qca953x_1p1_modes_no_xpa_tx_gain_table[][2] = {
        {0x00016448, 0x6c927a70},
 };
 
+static const u32 qca953x_2p0_baseband_core[][2] = {
+       /* Addr      allmodes */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a9f6b},
+       {0x0000980c, 0x04900000},
+       {0x00009814, 0x0280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x6400a190},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x14000600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x32840bbe},
+       {0x000098bc, 0x00000002},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009c04, 0xff55ff55},
+       {0x00009c08, 0x0320ff55},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x9883800a},
+       {0x00009d10, 0x018848c6},
+       {0x00009d14, 0x00c0040b},
+       {0x00009d18, 0x00000000},
+       {0x00009e08, 0x0038230c},
+       {0x00009e24, 0x990bb515},
+       {0x00009e28, 0x0c6f0000},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x0d261820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009fc0, 0x813e4788},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x00009fd0, 0x02993b93},
+       {0x0000a20c, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a248, 0x00000140},
+       {0x0000a2a0, 0x00000007},
+       {0x0000a2c0, 0x00000007},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3a4, 0x000400ff},
+       {0x0000a3a8, 0x6a6a6a6a},
+       {0x0000a3ac, 0x6a6a6a6a},
+       {0x0000a3b0, 0x00c8641a},
+       {0x0000a3b4, 0x0000001a},
+       {0x0000a3b8, 0x0088642a},
+       {0x0000a3bc, 0x000001fa},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000000},
+       {0x0000a3f8, 0x0c9bd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce42108},
+       {0x0000a418, 0x2d001dce},
+       {0x0000a41c, 0x1ce73908},
+       {0x0000a420, 0x000001ce},
+       {0x0000a424, 0x1ce738e7},
+       {0x0000a428, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00100000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x05000080},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a458, 0x00000000},
+       {0x0000a644, 0xbfad9d74},
+       {0x0000a648, 0x0048060a},
+       {0x0000a64c, 0x00003c37},
+       {0x0000a670, 0x03020100},
+       {0x0000a674, 0x09080504},
+       {0x0000a678, 0x0d0c0b0a},
+       {0x0000a67c, 0x13121110},
+       {0x0000a680, 0x31301514},
+       {0x0000a684, 0x35343332},
+       {0x0000a688, 0x00000036},
+       {0x0000a690, 0x08000838},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000000},
+       {0x0000a8d0, 0x004b6a8e},
+       {0x0000a8d4, 0x00000820},
+       {0x0000a8dc, 0x00000000},
+       {0x0000a8f0, 0x00000000},
+       {0x0000a8f4, 0x00000000},
+       {0x0000b2d0, 0x00000080},
+       {0x0000b2d4, 0x00000000},
+       {0x0000b2ec, 0x00000000},
+       {0x0000b2f0, 0x00000000},
+       {0x0000b2f4, 0x00000000},
+       {0x0000b2f8, 0x00000000},
+       {0x0000b408, 0x0e79e5c0},
+       {0x0000b40c, 0x00820820},
+       {0x0000b420, 0x00000000},
+};
+
+static const u32 qca953x_2p0_baseband_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+       {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+       {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+       {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+       {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+       {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+       {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+       {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+       {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+       {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01010e0e, 0x01010e0e},
+       {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+       {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+       {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+       {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+       {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+       {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+       {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+       {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+       {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
 #endif /* INITVALS_953X_H */
index 2ca8f7e061742420fe00a9bddf61fb366feb93dc..11b5e4dd629491179809aa89ef23aa9e57f5c6c4 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/leds.h>
 #include <linux/completion.h>
+#include <linux/time.h>
 
 #include "common.h"
 #include "debug.h"
@@ -35,10 +36,7 @@ extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
 extern int led_blink;
 extern bool is_ath9k_unloaded;
-
-struct ath_config {
-       u16 txpowlimit;
-};
+extern int ath9k_use_chanctx;
 
 /*************************/
 /* Descriptor Management */
@@ -167,7 +165,6 @@ struct ath_txq {
        u32 axq_ampdu_depth;
        bool stopped;
        bool axq_tx_inprogress;
-       struct list_head axq_acq;
        struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
        u8 txq_headidx;
        u8 txq_tailidx;
@@ -280,8 +277,9 @@ struct ath_node {
 struct ath_tx_control {
        struct ath_txq *txq;
        struct ath_node *an;
-       u8 paprd;
        struct ieee80211_sta *sta;
+       u8 paprd;
+       bool force_channel;
 };
 
 
@@ -325,6 +323,116 @@ struct ath_rx {
        u32 ampdu_ref;
 };
 
+struct ath_chanctx {
+       struct cfg80211_chan_def chandef;
+       struct list_head vifs;
+       struct list_head acq[IEEE80211_NUM_ACS];
+       int hw_queue_base;
+
+       /* do not dereference, use for comparison only */
+       struct ieee80211_vif *primary_sta;
+
+       struct ath_beacon_config beacon;
+       struct ath9k_hw_cal_data caldata;
+       struct timespec tsf_ts;
+       u64 tsf_val;
+       u32 last_beacon;
+
+       u16 txpower;
+       bool offchannel;
+       bool stopped;
+       bool active;
+       bool assigned;
+       bool switch_after_beacon;
+};
+
+enum ath_chanctx_event {
+       ATH_CHANCTX_EVENT_BEACON_PREPARE,
+       ATH_CHANCTX_EVENT_BEACON_SENT,
+       ATH_CHANCTX_EVENT_TSF_TIMER,
+       ATH_CHANCTX_EVENT_BEACON_RECEIVED,
+       ATH_CHANCTX_EVENT_ASSOC,
+       ATH_CHANCTX_EVENT_SWITCH,
+       ATH_CHANCTX_EVENT_UNASSIGN,
+       ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL,
+};
+
+enum ath_chanctx_state {
+       ATH_CHANCTX_STATE_IDLE,
+       ATH_CHANCTX_STATE_WAIT_FOR_BEACON,
+       ATH_CHANCTX_STATE_WAIT_FOR_TIMER,
+       ATH_CHANCTX_STATE_SWITCH,
+       ATH_CHANCTX_STATE_FORCE_ACTIVE,
+};
+
+struct ath_chanctx_sched {
+       bool beacon_pending;
+       bool offchannel_pending;
+       enum ath_chanctx_state state;
+       u8 beacon_miss;
+
+       u32 next_tbtt;
+       u32 switch_start_time;
+       unsigned int offchannel_duration;
+       unsigned int channel_switch_time;
+
+       /* backup, in case the hardware timer fails */
+       struct timer_list timer;
+};
+
+enum ath_offchannel_state {
+       ATH_OFFCHANNEL_IDLE,
+       ATH_OFFCHANNEL_PROBE_SEND,
+       ATH_OFFCHANNEL_PROBE_WAIT,
+       ATH_OFFCHANNEL_SUSPEND,
+       ATH_OFFCHANNEL_ROC_START,
+       ATH_OFFCHANNEL_ROC_WAIT,
+       ATH_OFFCHANNEL_ROC_DONE,
+};
+
+struct ath_offchannel {
+       struct ath_chanctx chan;
+       struct timer_list timer;
+       struct cfg80211_scan_request *scan_req;
+       struct ieee80211_vif *scan_vif;
+       int scan_idx;
+       enum ath_offchannel_state state;
+       struct ieee80211_channel *roc_chan;
+       struct ieee80211_vif *roc_vif;
+       int roc_duration;
+       int duration;
+};
+#define ath_for_each_chanctx(_sc, _ctx)                             \
+       for (ctx = &sc->chanctx[0];                                 \
+            ctx <= &sc->chanctx[ARRAY_SIZE(sc->chanctx) - 1];      \
+            ctx++)
+
+void ath9k_fill_chanctx_ops(void);
+void ath9k_chanctx_force_active(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif);
+static inline struct ath_chanctx *
+ath_chanctx_get(struct ieee80211_chanctx_conf *ctx)
+{
+       struct ath_chanctx **ptr = (void *) ctx->drv_priv;
+       return *ptr;
+}
+void ath_chanctx_init(struct ath_softc *sc);
+void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
+                            struct cfg80211_chan_def *chandef);
+void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx,
+                       struct cfg80211_chan_def *chandef);
+void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx);
+void ath_offchannel_timer(unsigned long data);
+void ath_offchannel_channel_change(struct ath_softc *sc);
+void ath_chanctx_offchan_switch(struct ath_softc *sc,
+                               struct ieee80211_channel *chan);
+struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc,
+                                             bool active);
+void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
+                      enum ath_chanctx_event ev);
+void ath_chanctx_timer(unsigned long data);
+
+int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan);
 int ath_startrecv(struct ath_softc *sc);
 bool ath_stoprecv(struct ath_softc *sc);
 u32 ath_calcrxfilter(struct ath_softc *sc);
@@ -341,6 +449,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq);
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_schedule_all(struct ath_softc *sc);
 int ath_tx_init(struct ath_softc *sc, int nbufs);
 int ath_txq_update(struct ath_softc *sc, int qnum,
                   struct ath9k_tx_queue_info *q);
@@ -370,32 +479,47 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 /********/
 
 struct ath_vif {
+       struct list_head list;
+
        struct ieee80211_vif *vif;
        struct ath_node mcast_node;
        int av_bslot;
-       bool primary_sta_vif;
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        struct ath_buf *av_bcbuf;
+       struct ath_chanctx *chanctx;
 
        /* P2P Client */
        struct ieee80211_noa_data noa;
+
+       /* P2P GO */
+       u8 noa_index;
+       u32 offchannel_start;
+       u32 offchannel_duration;
+
+       u32 periodic_noa_start;
+       u32 periodic_noa_duration;
 };
 
 struct ath9k_vif_iter_data {
        u8 hw_macaddr[ETH_ALEN]; /* address of the first vif */
        u8 mask[ETH_ALEN]; /* bssid mask */
        bool has_hw_macaddr;
+       u8 slottime;
+       bool beacons;
 
        int naps;      /* number of AP vifs */
        int nmeshes;   /* number of mesh vifs */
        int nstations; /* number of station vifs */
        int nwds;      /* number of WDS vifs */
        int nadhocs;   /* number of adhoc vifs */
+       struct ieee80211_vif *primary_sta;
 };
 
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif,
+void ath9k_calculate_iter_data(struct ath_softc *sc,
+                              struct ath_chanctx *ctx,
                               struct ath9k_vif_iter_data *iter_data);
+void ath9k_calculate_summary_state(struct ath_softc *sc,
+                                  struct ath_chanctx *ctx);
 
 /*******************/
 /* Beacon Handling */
@@ -458,6 +582,7 @@ void ath9k_csa_update(struct ath_softc *sc);
 #define ATH_PAPRD_TIMEOUT         100 /* msecs */
 #define ATH_PLL_WORK_INTERVAL     100
 
+void ath_chanctx_work(struct work_struct *work);
 void ath_tx_complete_poll_work(struct work_struct *work);
 void ath_reset_work(struct work_struct *work);
 bool ath_hw_check(struct ath_softc *sc);
@@ -473,6 +598,7 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
 void ath_ps_full_sleep(unsigned long data);
 void ath9k_p2p_ps_timer(void *priv);
 void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif);
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
 
 /**********/
 /* BTCOEX */
@@ -702,6 +828,8 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
 #define PS_BEACON_SYNC            BIT(4)
 #define PS_WAIT_FOR_ANI           BIT(5)
 
+#define ATH9K_NUM_CHANCTX  2 /* supports 2 operating channels */
+
 struct ath_softc {
        struct ieee80211_hw *hw;
        struct device *dev;
@@ -720,6 +848,7 @@ struct ath_softc {
        struct mutex mutex;
        struct work_struct paprd_work;
        struct work_struct hw_reset_work;
+       struct work_struct chanctx_work;
        struct completion paprd_complete;
        wait_queue_head_t tx_wait;
 
@@ -738,23 +867,27 @@ struct ath_softc {
        short nvifs;
        unsigned long ps_usecount;
 
-       struct ath_config config;
        struct ath_rx rx;
        struct ath_tx tx;
        struct ath_beacon beacon;
 
+       struct cfg80211_chan_def cur_chandef;
+       struct ath_chanctx chanctx[ATH9K_NUM_CHANCTX];
+       struct ath_chanctx *cur_chan;
+       struct ath_chanctx *next_chan;
+       spinlock_t chan_lock;
+       struct ath_offchannel offchannel;
+       struct ath_chanctx_sched sched;
+
 #ifdef CONFIG_MAC80211_LEDS
        bool led_registered;
        char led_name[32];
        struct led_classdev led_cdev;
 #endif
 
-       struct ath9k_hw_cal_data caldata;
-
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
 #endif
-       struct ath_beacon_config cur_beacon_conf;
        struct delayed_work tx_complete_work;
        struct delayed_work hw_pll_work;
        struct timer_list sleep_timer;
index e387f0b2954a0cf5500610b330f744a5b68cd69c..eaf8f058c15154941c1abfe0cc34585a5a217733 100644 (file)
@@ -80,7 +80,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
        u8 chainmask = ah->txchainmask;
        u8 rate = 0;
 
-       sband = &common->sbands[common->hw->conf.chandef.chan->band];
+       sband = &common->sbands[sc->cur_chandef.chan->band];
        rate = sband->bitrates[rateidx].hw_value;
        if (vif->bss_conf.use_short_preamble)
                rate |= sband->bitrates[rateidx].hw_value_short;
@@ -108,6 +108,55 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
        ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
 }
 
+static void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp,
+                                struct sk_buff *skb)
+{
+       static const u8 noa_ie_hdr[] = {
+               WLAN_EID_VENDOR_SPECIFIC,       /* type */
+               0,                              /* length */
+               0x50, 0x6f, 0x9a,               /* WFA OUI */
+               0x09,                           /* P2P subtype */
+               0x0c,                           /* Notice of Absence */
+               0x00,                           /* LSB of little-endian len */
+               0x00,                           /* MSB of little-endian len */
+       };
+
+       struct ieee80211_p2p_noa_attr *noa;
+       int noa_len, noa_desc, i = 0;
+       u8 *hdr;
+
+       if (!avp->offchannel_duration && !avp->periodic_noa_duration)
+               return;
+
+       noa_desc = !!avp->offchannel_duration + !!avp->periodic_noa_duration;
+       noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc;
+
+       hdr = skb_put(skb, sizeof(noa_ie_hdr));
+       memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr));
+       hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2;
+       hdr[7] = noa_len;
+
+       noa = (void *) skb_put(skb, noa_len);
+       memset(noa, 0, noa_len);
+
+       noa->index = avp->noa_index;
+       if (avp->periodic_noa_duration) {
+               u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
+
+               noa->desc[i].count = 255;
+               noa->desc[i].start_time = cpu_to_le32(avp->periodic_noa_start);
+               noa->desc[i].duration = cpu_to_le32(avp->periodic_noa_duration);
+               noa->desc[i].interval = cpu_to_le32(interval);
+               i++;
+       }
+
+       if (avp->offchannel_duration) {
+               noa->desc[i].count = 1;
+               noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start);
+               noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration);
+       }
+}
+
 static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
                                             struct ieee80211_vif *vif)
 {
@@ -155,6 +204,9 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
                hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
        }
 
+       if (vif->p2p)
+               ath9k_beacon_add_noa(sc, avp, skb);
+
        bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
                                         skb->len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
@@ -249,7 +301,7 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
 static int ath9k_beacon_choose_slot(struct ath_softc *sc)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
        u16 intval;
        u32 tsftu;
        u64 tsf;
@@ -277,8 +329,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
 static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
        struct ath_vif *avp = (void *)vif->drv_priv;
+       struct ath_beacon_config *cur_conf = &avp->chanctx->beacon;
        u32 tsfadjust;
 
        if (avp->av_bslot == 0)
@@ -374,12 +426,19 @@ void ath9k_beacon_tasklet(unsigned long data)
        vif = sc->beacon.bslot[slot];
 
        /* EDMA devices check that in the tx completion function. */
-       if (!edma && ath9k_csa_is_finished(sc, vif))
-               return;
+       if (!edma) {
+               if (sc->sched.beacon_pending)
+                       ath_chanctx_event(sc, NULL,
+                                         ATH_CHANCTX_EVENT_BEACON_SENT);
+
+               if (ath9k_csa_is_finished(sc, vif))
+                       return;
+       }
 
        if (!vif || !vif->bss_conf.enable_beacon)
                return;
 
+       ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE);
        bf = ath9k_beacon_generate(sc->hw, vif);
 
        if (sc->beacon.bmisscnt != 0) {
@@ -500,7 +559,6 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
                                      struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_vif *avp = (void *)vif->drv_priv;
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
                if ((vif->type != NL80211_IFTYPE_AP) ||
@@ -514,7 +572,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
        if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
                if ((vif->type == NL80211_IFTYPE_STATION) &&
                    test_bit(ATH_OP_BEACONS, &common->op_flags) &&
-                   !avp->primary_sta_vif) {
+                   vif != sc->cur_chan->primary_sta) {
                        ath_dbg(common, CONFIG,
                                "Beacon already configured for a station interface\n");
                        return false;
@@ -525,10 +583,11 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
 }
 
 static void ath9k_cache_beacon_config(struct ath_softc *sc,
+                                     struct ath_chanctx *ctx,
                                      struct ieee80211_bss_conf *bss_conf)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &ctx->beacon;
 
        ath_dbg(common, BEACON,
                "Caching beacon data for BSS: %pM\n", bss_conf->bssid);
@@ -564,20 +623,29 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
                         u32 changed)
 {
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
         struct ath_hw *ah = sc->sc_ah;
         struct ath_common *common = ath9k_hw_common(ah);
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       struct ath_chanctx *ctx = avp->chanctx;
+       struct ath_beacon_config *cur_conf;
        unsigned long flags;
        bool skip_beacon = false;
 
+       if (!ctx)
+               return;
+
+       cur_conf = &avp->chanctx->beacon;
        if (vif->type == NL80211_IFTYPE_AP)
                ath9k_set_tsfadjust(sc, vif);
 
        if (!ath9k_allow_beacon_config(sc, vif))
                return;
 
-       if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
-               ath9k_cache_beacon_config(sc, bss_conf);
+       if (vif->type == NL80211_IFTYPE_STATION) {
+               ath9k_cache_beacon_config(sc, ctx, bss_conf);
+               if (ctx != sc->cur_chan)
+                       return;
+
                ath9k_set_beacon(sc);
                set_bit(ATH_OP_BEACONS, &common->op_flags);
                return;
@@ -593,10 +661,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
                        cur_conf->enable_beacon = false;
                } else if (bss_conf->enable_beacon) {
                        cur_conf->enable_beacon = true;
-                       ath9k_cache_beacon_config(sc, bss_conf);
+                       ath9k_cache_beacon_config(sc, ctx, bss_conf);
                }
        }
 
+       if (ctx != sc->cur_chan)
+               return;
+
        /*
         * Configure the HW beacon registers only when we have a valid
         * beacon interval.
@@ -631,7 +702,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 void ath9k_set_beacon(struct ath_softc *sc)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
 
        switch (sc->sc_ah->opmode) {
        case NL80211_IFTYPE_AP:
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
new file mode 100644 (file)
index 0000000..ba214eb
--- /dev/null
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/* Set/change channels.  If the channel is really being changed, it's done
+ * by reseting the chip.  To accomplish this we must first cleanup any pending
+ * DMA, then restart stuff.
+ */
+static int ath_set_channel(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ieee80211_hw *hw = sc->hw;
+       struct ath9k_channel *hchan;
+       struct cfg80211_chan_def *chandef = &sc->cur_chan->chandef;
+       struct ieee80211_channel *chan = chandef->chan;
+       int pos = chan->hw_value;
+       int old_pos = -1;
+       int r;
+
+       if (test_bit(ATH_OP_INVALID, &common->op_flags))
+               return -EIO;
+
+       if (ah->curchan)
+               old_pos = ah->curchan - &ah->channels[0];
+
+       ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+               chan->center_freq, chandef->width);
+
+       /* update survey stats for the old channel before switching */
+       spin_lock_bh(&common->cc_lock);
+       ath_update_survey_stats(sc);
+       spin_unlock_bh(&common->cc_lock);
+
+       ath9k_cmn_get_channel(hw, ah, chandef);
+
+       /* If the operating channel changes, change the survey in-use flags
+        * along with it.
+        * Reset the survey data for the new channel, unless we're switching
+        * back to the operating channel from an off-channel operation.
+        */
+       if (!sc->cur_chan->offchannel && sc->cur_survey != &sc->survey[pos]) {
+               if (sc->cur_survey)
+                       sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+               sc->cur_survey = &sc->survey[pos];
+
+               memset(sc->cur_survey, 0, sizeof(struct survey_info));
+               sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+       } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+               memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+       }
+
+       hchan = &sc->sc_ah->channels[pos];
+       r = ath_reset_internal(sc, hchan);
+       if (r)
+               return r;
+
+       /* The most recent snapshot of channel->noisefloor for the old
+        * channel is only available after the hardware reset. Copy it to
+        * the survey stats now.
+        */
+       if (old_pos >= 0)
+               ath_update_survey_nf(sc, old_pos);
+
+       /* Enable radar pulse detection if on a DFS channel. Spectral
+        * scanning and radar detection can not be used concurrently.
+        */
+       if (hw->conf.radar_enabled) {
+               u32 rxfilter;
+
+               /* set HW specific DFS configuration */
+               ath9k_hw_set_radar_params(ah);
+               rxfilter = ath9k_hw_getrxfilter(ah);
+               rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+                               ATH9K_RX_FILTER_PHYERR;
+               ath9k_hw_setrxfilter(ah, rxfilter);
+               ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+                       chan->center_freq);
+       } else {
+               /* perform spectral scan if requested. */
+               if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
+                       sc->spectral_mode == SPECTRAL_CHANSCAN)
+                       ath9k_spectral_scan_trigger(hw);
+       }
+
+       return 0;
+}
+
+static bool
+ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
+                             bool powersave)
+{
+       struct ieee80211_vif *vif = avp->vif;
+       struct ieee80211_sta *sta = NULL;
+       struct ieee80211_hdr_3addr *nullfunc;
+       struct ath_tx_control txctl;
+       struct sk_buff *skb;
+       int band = sc->cur_chan->chandef.chan->band;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               if (!vif->bss_conf.assoc)
+                       return false;
+
+               skb = ieee80211_nullfunc_get(sc->hw, vif);
+               if (!skb)
+                       return false;
+
+               nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
+               if (powersave)
+                       nullfunc->frame_control |=
+                               cpu_to_le16(IEEE80211_FCTL_PM);
+
+               skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+               if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) {
+                       dev_kfree_skb_any(skb);
+                       return false;
+               }
+               break;
+       default:
+               return false;
+       }
+
+       memset(&txctl, 0, sizeof(txctl));
+       txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+       txctl.sta = sta;
+       txctl.force_channel = true;
+       if (ath_tx_start(sc->hw, skb, &txctl)) {
+               ieee80211_free_txskb(sc->hw, skb);
+               return false;
+       }
+
+       return true;
+}
+
+void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_vif *avp;
+       bool active = false;
+       u8 n_active = 0;
+
+       if (!ctx)
+               return;
+
+       list_for_each_entry(avp, &ctx->vifs, list) {
+               struct ieee80211_vif *vif = avp->vif;
+
+               switch (vif->type) {
+               case NL80211_IFTYPE_P2P_CLIENT:
+               case NL80211_IFTYPE_STATION:
+                       if (vif->bss_conf.assoc)
+                               active = true;
+                       break;
+               default:
+                       active = true;
+                       break;
+               }
+       }
+       ctx->active = active;
+
+       ath_for_each_chanctx(sc, ctx) {
+               if (!ctx->assigned || list_empty(&ctx->vifs))
+                       continue;
+               n_active++;
+       }
+
+       if (n_active <= 1) {
+               clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags);
+               return;
+       }
+       if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+               return;
+       ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL);
+}
+
+static bool
+ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave)
+{
+       struct ath_vif *avp;
+       bool sent = false;
+
+       rcu_read_lock();
+       list_for_each_entry(avp, &sc->cur_chan->vifs, list) {
+               if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave))
+                       sent = true;
+       }
+       rcu_read_unlock();
+
+       return sent;
+}
+
+static bool ath_chanctx_defer_switch(struct ath_softc *sc)
+{
+       if (sc->cur_chan == &sc->offchannel.chan)
+               return false;
+
+       switch (sc->sched.state) {
+       case ATH_CHANCTX_STATE_SWITCH:
+               return false;
+       case ATH_CHANCTX_STATE_IDLE:
+               if (!sc->cur_chan->switch_after_beacon)
+                       return false;
+
+               sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+               break;
+       default:
+               break;
+       }
+
+       return true;
+}
+
+static void ath_chanctx_set_next(struct ath_softc *sc, bool force)
+{
+       struct timespec ts;
+       bool measure_time = false;
+       bool send_ps = false;
+
+       spin_lock_bh(&sc->chan_lock);
+       if (!sc->next_chan) {
+               spin_unlock_bh(&sc->chan_lock);
+               return;
+       }
+
+       if (!force && ath_chanctx_defer_switch(sc)) {
+               spin_unlock_bh(&sc->chan_lock);
+               return;
+       }
+
+       if (sc->cur_chan != sc->next_chan) {
+               sc->cur_chan->stopped = true;
+               spin_unlock_bh(&sc->chan_lock);
+
+               if (sc->next_chan == &sc->offchannel.chan) {
+                       getrawmonotonic(&ts);
+                       measure_time = true;
+               }
+               __ath9k_flush(sc->hw, ~0, true);
+
+               if (ath_chanctx_send_ps_frame(sc, true))
+                       __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false);
+
+               send_ps = true;
+               spin_lock_bh(&sc->chan_lock);
+
+               if (sc->cur_chan != &sc->offchannel.chan) {
+                       getrawmonotonic(&sc->cur_chan->tsf_ts);
+                       sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
+               }
+       }
+       sc->cur_chan = sc->next_chan;
+       sc->cur_chan->stopped = false;
+       sc->next_chan = NULL;
+       sc->sched.offchannel_duration = 0;
+       if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE)
+               sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+
+       spin_unlock_bh(&sc->chan_lock);
+
+       if (sc->sc_ah->chip_fullsleep ||
+           memcmp(&sc->cur_chandef, &sc->cur_chan->chandef,
+                  sizeof(sc->cur_chandef))) {
+               ath_set_channel(sc);
+               if (measure_time)
+                       sc->sched.channel_switch_time =
+                               ath9k_hw_get_tsf_offset(&ts, NULL);
+       }
+       if (send_ps)
+               ath_chanctx_send_ps_frame(sc, false);
+
+       ath_offchannel_channel_change(sc);
+       ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH);
+}
+
+void ath_chanctx_work(struct work_struct *work)
+{
+       struct ath_softc *sc = container_of(work, struct ath_softc,
+                                           chanctx_work);
+       mutex_lock(&sc->mutex);
+       ath_chanctx_set_next(sc, false);
+       mutex_unlock(&sc->mutex);
+}
+
+void ath_chanctx_init(struct ath_softc *sc)
+{
+       struct ath_chanctx *ctx;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *chan;
+       int i, j;
+
+       sband = &common->sbands[IEEE80211_BAND_2GHZ];
+       if (!sband->n_channels)
+               sband = &common->sbands[IEEE80211_BAND_5GHZ];
+
+       chan = &sband->channels[0];
+       for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
+               ctx = &sc->chanctx[i];
+               cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
+               INIT_LIST_HEAD(&ctx->vifs);
+               ctx->txpower = ATH_TXPOWER_MAX;
+               for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
+                       INIT_LIST_HEAD(&ctx->acq[j]);
+       }
+       ctx = &sc->offchannel.chan;
+       cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
+       INIT_LIST_HEAD(&ctx->vifs);
+       ctx->txpower = ATH_TXPOWER_MAX;
+       for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
+               INIT_LIST_HEAD(&ctx->acq[j]);
+       sc->offchannel.chan.offchannel = true;
+
+}
+
+void ath9k_chanctx_force_active(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_vif *avp = (struct ath_vif *) vif->drv_priv;
+       bool changed = false;
+
+       if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags))
+               return;
+
+       if (!avp->chanctx)
+               return;
+
+       mutex_lock(&sc->mutex);
+
+       spin_lock_bh(&sc->chan_lock);
+       if (sc->next_chan || (sc->cur_chan != avp->chanctx)) {
+               sc->next_chan = avp->chanctx;
+               changed = true;
+       }
+       sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE;
+       spin_unlock_bh(&sc->chan_lock);
+
+       if (changed)
+               ath_chanctx_set_next(sc, true);
+
+       mutex_unlock(&sc->mutex);
+}
+
+void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx,
+                       struct cfg80211_chan_def *chandef)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+       spin_lock_bh(&sc->chan_lock);
+
+       if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) &&
+           (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) {
+               sc->sched.offchannel_pending = true;
+               spin_unlock_bh(&sc->chan_lock);
+               return;
+       }
+
+       sc->next_chan = ctx;
+       if (chandef)
+               ctx->chandef = *chandef;
+
+       if (sc->next_chan == &sc->offchannel.chan) {
+               sc->sched.offchannel_duration =
+                       TU_TO_USEC(sc->offchannel.duration) +
+                       sc->sched.channel_switch_time;
+       }
+       spin_unlock_bh(&sc->chan_lock);
+       ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+}
+
+void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx,
+                            struct cfg80211_chan_def *chandef)
+{
+       bool cur_chan;
+
+       spin_lock_bh(&sc->chan_lock);
+       if (chandef)
+               memcpy(&ctx->chandef, chandef, sizeof(*chandef));
+       cur_chan = sc->cur_chan == ctx;
+       spin_unlock_bh(&sc->chan_lock);
+
+       if (!cur_chan)
+               return;
+
+       ath_set_channel(sc);
+}
+
+struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, bool active)
+{
+       struct ath_chanctx *ctx;
+
+       ath_for_each_chanctx(sc, ctx) {
+               if (!ctx->assigned || list_empty(&ctx->vifs))
+                       continue;
+               if (active && !ctx->active)
+                       continue;
+
+               if (ctx->switch_after_beacon)
+                       return ctx;
+       }
+
+       return &sc->chanctx[0];
+}
+
+void ath_chanctx_offchan_switch(struct ath_softc *sc,
+                               struct ieee80211_channel *chan)
+{
+       struct cfg80211_chan_def chandef;
+
+       cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+
+       ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef);
+}
+
+static struct ath_chanctx *
+ath_chanctx_get_next(struct ath_softc *sc, struct ath_chanctx *ctx)
+{
+       int idx = ctx - &sc->chanctx[0];
+
+       return &sc->chanctx[!idx];
+}
+
+static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
+{
+       struct ath_chanctx *prev, *cur;
+       struct timespec ts;
+       u32 cur_tsf, prev_tsf, beacon_int;
+       s32 offset;
+
+       beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
+
+       cur = sc->cur_chan;
+       prev = ath_chanctx_get_next(sc, cur);
+
+       getrawmonotonic(&ts);
+       cur_tsf = (u32) cur->tsf_val +
+                 ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
+
+       prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
+       prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
+
+       /* Adjust the TSF time of the AP chanctx to keep its beacons
+        * at half beacon interval offset relative to the STA chanctx.
+        */
+       offset = cur_tsf - prev_tsf;
+
+       /* Ignore stale data or spurious timestamps */
+       if (offset < 0 || offset > 3 * beacon_int)
+               return;
+
+       offset = beacon_int / 2 - (offset % beacon_int);
+       prev->tsf_val += offset;
+}
+
+void ath_chanctx_timer(unsigned long data)
+{
+       struct ath_softc *sc = (struct ath_softc *) data;
+
+       ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
+}
+
+/* Configure the TSF based hardware timer for a channel switch.
+ * Also set up backup software timer, in case the gen timer fails.
+ * This could be caused by a hardware reset.
+ */
+static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time)
+{
+       struct ath_hw *ah = sc->sc_ah;
+
+       ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000);
+       tsf_time -= ath9k_hw_gettsf32(ah);
+       tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1;
+       mod_timer(&sc->sched.timer, tsf_time);
+}
+
+void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
+                      enum ath_chanctx_event ev)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath_beacon_config *cur_conf;
+       struct ath_vif *avp = NULL;
+       struct ath_chanctx *ctx;
+       u32 tsf_time;
+       u32 beacon_int;
+       bool noa_changed = false;
+
+       if (vif)
+               avp = (struct ath_vif *) vif->drv_priv;
+
+       spin_lock_bh(&sc->chan_lock);
+
+       switch (ev) {
+       case ATH_CHANCTX_EVENT_BEACON_PREPARE:
+               if (avp->offchannel_duration)
+                       avp->offchannel_duration = 0;
+
+               if (avp->chanctx != sc->cur_chan)
+                       break;
+
+               if (sc->sched.offchannel_pending) {
+                       sc->sched.offchannel_pending = false;
+                       sc->next_chan = &sc->offchannel.chan;
+                       sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+               }
+
+               ctx = ath_chanctx_get_next(sc, sc->cur_chan);
+               if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) {
+                       sc->next_chan = ctx;
+                       sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+               }
+
+               /* if the timer missed its window, use the next interval */
+               if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
+                       sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON;
+
+               if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
+                       break;
+
+               sc->sched.beacon_pending = true;
+               sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER);
+
+               cur_conf = &sc->cur_chan->beacon;
+               beacon_int = TU_TO_USEC(cur_conf->beacon_interval);
+
+               /* defer channel switch by a quarter beacon interval */
+               tsf_time = sc->sched.next_tbtt + beacon_int / 4;
+               sc->sched.switch_start_time = tsf_time;
+               sc->cur_chan->last_beacon = sc->sched.next_tbtt;
+
+               /* Prevent wrap-around issues */
+               if (avp->periodic_noa_duration &&
+                   tsf_time - avp->periodic_noa_start > BIT(30))
+                       avp->periodic_noa_duration = 0;
+
+               if (ctx->active && !avp->periodic_noa_duration) {
+                       avp->periodic_noa_start = tsf_time;
+                       avp->periodic_noa_duration =
+                               TU_TO_USEC(cur_conf->beacon_interval) / 2 -
+                               sc->sched.channel_switch_time;
+                       noa_changed = true;
+               } else if (!ctx->active && avp->periodic_noa_duration) {
+                       avp->periodic_noa_duration = 0;
+                       noa_changed = true;
+               }
+
+               /* If at least two consecutive beacons were missed on the STA
+                * chanctx, stay on the STA channel for one extra beacon period,
+                * to resync the timer properly.
+                */
+               if (ctx->active && sc->sched.beacon_miss >= 2)
+                       sc->sched.offchannel_duration = 3 * beacon_int / 2;
+
+               if (sc->sched.offchannel_duration) {
+                       noa_changed = true;
+                       avp->offchannel_start = tsf_time;
+                       avp->offchannel_duration =
+                               sc->sched.offchannel_duration;
+               }
+
+               if (noa_changed)
+                       avp->noa_index++;
+               break;
+       case ATH_CHANCTX_EVENT_BEACON_SENT:
+               if (!sc->sched.beacon_pending)
+                       break;
+
+               sc->sched.beacon_pending = false;
+               if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON)
+                       break;
+
+               sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
+               ath_chanctx_setup_timer(sc, sc->sched.switch_start_time);
+               break;
+       case ATH_CHANCTX_EVENT_TSF_TIMER:
+               if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_TIMER)
+                       break;
+
+               if (!sc->cur_chan->switch_after_beacon &&
+                   sc->sched.beacon_pending)
+                       sc->sched.beacon_miss++;
+
+               sc->sched.state = ATH_CHANCTX_STATE_SWITCH;
+               ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+               break;
+       case ATH_CHANCTX_EVENT_BEACON_RECEIVED:
+               if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
+                   sc->cur_chan == &sc->offchannel.chan)
+                       break;
+
+               ath_chanctx_adjust_tbtt_delta(sc);
+               sc->sched.beacon_pending = false;
+               sc->sched.beacon_miss = 0;
+
+               /* TSF time might have been updated by the incoming beacon,
+                * need update the channel switch timer to reflect the change.
+                */
+               tsf_time = sc->sched.switch_start_time;
+               tsf_time -= (u32) sc->cur_chan->tsf_val +
+                       ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+               tsf_time += ath9k_hw_gettsf32(ah);
+
+
+               ath_chanctx_setup_timer(sc, tsf_time);
+               break;
+       case ATH_CHANCTX_EVENT_ASSOC:
+               if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+                   avp->chanctx != sc->cur_chan)
+                       break;
+
+               sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+               /* fall through */
+       case ATH_CHANCTX_EVENT_SWITCH:
+               if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
+                   sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
+                   sc->cur_chan->switch_after_beacon ||
+                   sc->cur_chan == &sc->offchannel.chan)
+                       break;
+
+               /* If this is a station chanctx, stay active for a half
+                * beacon period (minus channel switch time)
+                */
+               sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
+               cur_conf = &sc->cur_chan->beacon;
+
+               sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER;
+
+               tsf_time = TU_TO_USEC(cur_conf->beacon_interval) / 2;
+               if (sc->sched.beacon_miss >= 2) {
+                       sc->sched.beacon_miss = 0;
+                       tsf_time *= 3;
+               }
+
+               tsf_time -= sc->sched.channel_switch_time;
+               tsf_time += ath9k_hw_gettsf32(sc->sc_ah);
+               sc->sched.switch_start_time = tsf_time;
+
+               ath_chanctx_setup_timer(sc, tsf_time);
+               sc->sched.beacon_pending = true;
+               break;
+       case ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL:
+               if (sc->cur_chan == &sc->offchannel.chan ||
+                   sc->cur_chan->switch_after_beacon)
+                       break;
+
+               sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan);
+               ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+               break;
+       case ATH_CHANCTX_EVENT_UNASSIGN:
+               if (sc->cur_chan->assigned) {
+                       if (sc->next_chan && !sc->next_chan->assigned &&
+                           sc->next_chan != &sc->offchannel.chan)
+                               sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+                       break;
+               }
+
+               ctx = ath_chanctx_get_next(sc, sc->cur_chan);
+               sc->sched.state = ATH_CHANCTX_STATE_IDLE;
+               if (!ctx->assigned)
+                       break;
+
+               sc->next_chan = ctx;
+               ieee80211_queue_work(sc->hw, &sc->chanctx_work);
+               break;
+       }
+
+       spin_unlock_bh(&sc->chan_lock);
+}
index 775d1d20ce0b384bae1897879b1e4d17f6b4fbd4..733be5178481e6967141e952a23f348705bae2e4 100644 (file)
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
                                 struct ath9k_beacon_state *bs)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       int dtim_intval;
+       int dtim_intval, sleepduration;
        u64 tsf;
 
        /* No need to configure beacon if we are not associated */
@@ -75,6 +75,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         * last beacon we received (which may be none).
         */
        dtim_intval = conf->intval * conf->dtim_period;
+       sleepduration = ah->hw->conf.listen_interval * conf->intval;
 
        /*
         * Pull nexttbtt forward to reflect the current
@@ -112,7 +113,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
         */
 
        bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                conf->intval));
+                                                 sleepduration));
        if (bs->bs_sleepduration > bs->bs_dtimperiod)
                bs->bs_sleepduration = bs->bs_dtimperiod;
 
index 6cc42be48d4e60e68f3e2603ebfc465f89159a0d..ce073e995dfe3c50bfe128e5f3eadb1d5c9f5663 100644 (file)
@@ -750,13 +750,13 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
 {
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ieee80211_hw *hw = sc->hw;
        struct ath9k_vif_iter_data iter_data;
+       struct ath_chanctx *ctx;
        char buf[512];
        unsigned int len = 0;
        ssize_t retval = 0;
        unsigned int reg;
-       u32 rxfilter;
+       u32 rxfilter, i;
 
        len += scnprintf(buf + len, sizeof(buf) - len,
                         "BSSID: %pM\n", common->curbssid);
@@ -826,14 +826,20 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
 
        len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
-       ath9k_calculate_iter_data(hw, NULL, &iter_data);
-
-       len += scnprintf(buf + len, sizeof(buf) - len,
-                        "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
-                        " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
-                        iter_data.naps, iter_data.nstations, iter_data.nmeshes,
-                        iter_data.nwds, iter_data.nadhocs,
-                        sc->nvifs, sc->nbcnvifs);
+       i = 0;
+       ath_for_each_chanctx(sc, ctx) {
+               if (!ctx->assigned || list_empty(&ctx->vifs))
+                       continue;
+               ath9k_calculate_iter_data(sc, ctx, &iter_data);
+
+               len += scnprintf(buf + len, sizeof(buf) - len,
+                       "VIF-COUNTS: CTX %i AP: %i STA: %i MESH: %i WDS: %i",
+                       i++, iter_data.naps, iter_data.nstations,
+                       iter_data.nmeshes, iter_data.nwds);
+               len += scnprintf(buf + len, sizeof(buf) - len,
+                       " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+                       iter_data.nadhocs, sc->nvifs, sc->nbcnvifs);
+       }
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -1080,7 +1086,7 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
 {
        struct ath_softc *sc = file->private_data;
        struct ath_hw *ah = sc->sc_ah;
-       struct ath9k_nfcal_hist *h = sc->caldata.nfCalHist;
+       struct ath9k_nfcal_hist *h = sc->cur_chan->caldata.nfCalHist;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &common->hw->conf;
        u32 len = 0, size = 1500;
index 2a8ed8375ec0584771f4f7dcc833d17131ec34b2..fd0158fdf144df989c501316cbbe4692899dc693 100644 (file)
@@ -791,7 +791,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
                                refdiv = 5;
                        } else {
                                pll2_divint = 0x11;
-                               pll2_divfrac = 0x26666;
+                               pll2_divfrac =
+                                       AR_SREV_9531(ah) ? 0x26665 : 0x26666;
                                refdiv = 1;
                        }
                }
@@ -1730,6 +1731,23 @@ fail:
        return -EINVAL;
 }
 
+u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur)
+{
+       struct timespec ts;
+       s64 usec;
+
+       if (!cur) {
+               getrawmonotonic(&ts);
+               cur = &ts;
+       }
+
+       usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000;
+       usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000;
+
+       return (u32) usec;
+}
+EXPORT_SYMBOL(ath9k_hw_get_tsf_offset);
+
 int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                   struct ath9k_hw_cal_data *caldata, bool fastcc)
 {
@@ -1739,7 +1757,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        u32 saveDefAntenna;
        u32 macStaId1;
        u64 tsf = 0;
-       s64 usec = 0;
        int r;
        bool start_mci_reset = false;
        bool save_fullsleep = ah->chip_fullsleep;
@@ -1785,7 +1802,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        /* Save TSF before chip reset, a cold reset clears it */
        tsf = ath9k_hw_gettsf64(ah);
        getrawmonotonic(&ts);
-       usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000;
 
        saveLedState = REG_READ(ah, AR_CFG_LED) &
                (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
@@ -1818,9 +1834,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        }
 
        /* Restore TSF */
-       getrawmonotonic(&ts);
-       usec = ts.tv_sec * 1000000ULL + ts.tv_nsec / 1000 - usec;
-       ath9k_hw_settsf64(ah, tsf + usec);
+       ath9k_hw_settsf64(ah, tsf + ath9k_hw_get_tsf_offset(&ts, NULL));
 
        if (AR_SREV_9280_20_OR_LATER(ah))
                REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
index 0acd4b5a48929f4443fde73706f6ee6bb8ce7a0b..51b4ebe04c04faaf612c744b2bbad93001c46622 100644 (file)
@@ -1000,6 +1000,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
+u32 ath9k_hw_get_tsf_offset(struct timespec *last, struct timespec *cur);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
index 0246b990fe87ade49ffa26ca417a8ff999179ebc..39419ea845cc0640ea30927e600e5c2f769cf075 100644 (file)
@@ -61,7 +61,7 @@ static int ath9k_ps_enable;
 module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
 MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
 
-static int ath9k_use_chanctx;
+int ath9k_use_chanctx;
 module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444);
 MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency");
 
@@ -169,9 +169,9 @@ static void ath9k_reg_notifier(struct wiphy *wiphy,
 
        /* Set tx power */
        if (ah->curchan) {
-               sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+               sc->cur_chan->txpower = 2 * ah->curchan->chan->max_power;
                ath9k_ps_wakeup(sc);
-               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+               ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
                sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
                /* synchronize DFS detector if regulatory domain changed */
                if (sc->dfs_detector != NULL)
@@ -335,7 +335,6 @@ static void ath9k_init_misc(struct ath_softc *sc)
        setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
 
        common->last_rssi = ATH_RSSI_DUMMY_MARKER;
-       sc->config.txpowlimit = ATH_TXPOWER_MAX;
        memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
        sc->beacon.slottime = ATH9K_SLOT_TIME_9;
 
@@ -511,6 +510,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
        sc->tx99_power = MAX_RATE_POWER + 1;
        init_waitqueue_head(&sc->tx_wait);
+       sc->cur_chan = &sc->chanctx[0];
+       if (!ath9k_use_chanctx)
+               sc->cur_chan->hw_queue_base = 0;
 
        if (!pdata || pdata->use_eeprom) {
                ah->ah_flags |= AH_USE_EEPROM;
@@ -556,6 +558,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        spin_lock_init(&common->cc_lock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
+       spin_lock_init(&sc->chan_lock);
        mutex_init(&sc->mutex);
        tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
        tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
@@ -564,7 +567,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc);
        INIT_WORK(&sc->hw_reset_work, ath_reset_work);
        INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+       INIT_WORK(&sc->chanctx_work, ath_chanctx_work);
        INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+       setup_timer(&sc->offchannel.timer, ath_offchannel_timer,
+                   (unsigned long)sc);
+       setup_timer(&sc->sched.timer, ath_chanctx_timer, (unsigned long)sc);
 
        /*
         * Cache line size is used to size and align various
@@ -599,6 +606,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        ath9k_cmn_init_crypto(sc->sc_ah);
        ath9k_init_misc(sc);
        ath_fill_led_pin(sc);
+       ath_chanctx_init(sc);
 
        if (common->bus_ops->aspm_init)
                common->bus_ops->aspm_init(common);
@@ -664,6 +672,12 @@ static const struct ieee80211_iface_limit wds_limits[] = {
        { .max = 2048,  .types = BIT(NL80211_IFTYPE_WDS) },
 };
 
+static const struct ieee80211_iface_limit if_limits_multi[] = {
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_STATION) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
 static const struct ieee80211_iface_limit if_dfs_limits[] = {
        { .max = 1,     .types = BIT(NL80211_IFTYPE_AP) |
 #ifdef CONFIG_MAC80211_MESH
@@ -672,6 +686,16 @@ static const struct ieee80211_iface_limit if_dfs_limits[] = {
                                 BIT(NL80211_IFTYPE_ADHOC) },
 };
 
+static const struct ieee80211_iface_combination if_comb_multi[] = {
+       {
+               .limits = if_limits_multi,
+               .n_limits = ARRAY_SIZE(if_limits_multi),
+               .max_interfaces = 2,
+               .num_different_channels = 2,
+               .beacon_int_infra_match = true,
+       },
+};
+
 static const struct ieee80211_iface_combination if_comb[] = {
        {
                .limits = if_limits,
@@ -712,6 +736,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                IEEE80211_HW_SPECTRUM_MGMT |
                IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                IEEE80211_HW_SUPPORTS_RC_TABLE |
+               IEEE80211_HW_QUEUE_CONTROL |
                IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
 
        if (ath9k_ps_enable)
@@ -739,12 +764,21 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                        BIT(NL80211_IFTYPE_STATION) |
                        BIT(NL80211_IFTYPE_ADHOC) |
                        BIT(NL80211_IFTYPE_MESH_POINT);
-               hw->wiphy->iface_combinations = if_comb;
                if (!ath9k_use_chanctx) {
+                       hw->wiphy->iface_combinations = if_comb;
                        hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
                        hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS);
-               } else
-                       hw->wiphy->n_iface_combinations = 1;
+               } else {
+                       hw->wiphy->iface_combinations = if_comb_multi;
+                       hw->wiphy->n_iface_combinations =
+                               ARRAY_SIZE(if_comb_multi);
+                       hw->wiphy->max_scan_ssids = 255;
+                       hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+                       hw->wiphy->max_remain_on_channel_duration = 10000;
+                       hw->chanctx_data_size = sizeof(void *);
+                       hw->extra_beacon_tailroom =
+                               sizeof(struct ieee80211_p2p_noa_attr) + 9;
+               }
        }
 
        hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -756,9 +790,14 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
-       hw->queues = 4;
+       /* allow 4 queues per channel context +
+        * 1 cab queue + 1 offchannel tx queue
+        */
+       hw->queues = 10;
+       /* last queue for offchannel */
+       hw->offchannel_tx_hw_queue = hw->queues - 1;
        hw->max_rates = 4;
-       hw->max_listen_interval = 1;
+       hw->max_listen_interval = 10;
        hw->max_rate_tries = 10;
        hw->sta_data_size = sizeof(struct ath_node);
        hw->vif_data_size = sizeof(struct ath_vif);
index 72a715fe8f24e2699323e0b434cc6a4a1f894dd5..2343f56e64987730bcb3ad1cb5542c41dcd94310 100644 (file)
@@ -178,7 +178,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
        txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
 
        memset(tx_info, 0, sizeof(*tx_info));
-       tx_info->band = hw->conf.chandef.chan->band;
+       tx_info->band = sc->cur_chandef.chan->band;
        tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
        tx_info->control.rates[0].idx = 0;
        tx_info->control.rates[0].count = 1;
@@ -416,7 +416,7 @@ void ath_start_ani(struct ath_softc *sc)
 
        if (common->disable_ani ||
            !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
-           (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+           sc->cur_chan->offchannel)
                return;
 
        common->ani.longcal_timer = timestamp;
@@ -440,7 +440,7 @@ void ath_check_ani(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
 
        /*
         * Check for the various conditions in which ANI has to
index 62ac95d6bb9d6e60b3bbc47e4f7715eecbe56bb9..e6ac8d2e610ca421f60dbfa6eb68dd46f8061c9d 100644 (file)
@@ -19,9 +19,6 @@
 #include "ath9k.h"
 #include "btcoex.h"
 
-static void ath9k_set_assoc_state(struct ath_softc *sc,
-                                 struct ieee80211_vif *vif);
-
 u8 ath9k_parse_mpdudensity(u8 mpdudensity)
 {
        /*
@@ -63,9 +60,16 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
 
        spin_lock_bh(&txq->axq_lock);
 
-       if (txq->axq_depth || !list_empty(&txq->axq_acq))
+       if (txq->axq_depth)
                pending = true;
 
+       if (txq->mac80211_qnum >= 0) {
+               struct list_head *list;
+
+               list = &sc->cur_chan->acq[txq->mac80211_qnum];
+               if (!list_empty(list))
+                       pending = true;
+       }
        spin_unlock_bh(&txq->axq_lock);
        return pending;
 }
@@ -227,13 +231,22 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        }
 
        ath9k_cmn_update_txpow(ah, sc->curtxpow,
-                              sc->config.txpowlimit, &sc->curtxpow);
+                              sc->cur_chan->txpower, &sc->curtxpow);
 
        clear_bit(ATH_OP_HW_RESET, &common->op_flags);
-       ath9k_hw_set_interrupts(ah);
-       ath9k_hw_enable_interrupts(ah);
+       ath9k_calculate_summary_state(sc, sc->cur_chan);
+
+       if (!sc->cur_chan->offchannel && start) {
+               /* restore per chanctx TSF timer */
+               if (sc->cur_chan->tsf_val) {
+                       u32 offset;
+
+                       offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
+                                                        NULL);
+                       ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
+               }
+
 
-       if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
                if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
                        goto work;
 
@@ -247,26 +260,35 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                }
        work:
                ath_restart_work(sc);
+               ath_txq_schedule_all(sc);
+       }
 
-               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-                       if (!ATH_TXQ_SETUP(sc, i))
-                               continue;
+       sc->gtt_cnt = 0;
 
-                       spin_lock_bh(&sc->tx.txq[i].axq_lock);
-                       ath_txq_schedule(sc, &sc->tx.txq[i]);
-                       spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+       ath9k_hw_set_interrupts(ah);
+       ath9k_hw_enable_interrupts(ah);
+
+       if (!ath9k_use_chanctx)
+               ieee80211_wake_queues(sc->hw);
+       else {
+               if (sc->cur_chan == &sc->offchannel.chan)
+                       ieee80211_wake_queue(sc->hw,
+                                       sc->hw->offchannel_tx_hw_queue);
+               else {
+                       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+                               ieee80211_wake_queue(sc->hw,
+                                       sc->cur_chan->hw_queue_base + i);
                }
+               if (ah->opmode == NL80211_IFTYPE_AP)
+                       ieee80211_wake_queue(sc->hw, sc->hw->queues - 2);
        }
 
-       sc->gtt_cnt = 0;
-       ieee80211_wake_queues(sc->hw);
-
        ath9k_p2p_ps_timer(sc);
 
        return true;
 }
 
-static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
+int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -279,9 +301,9 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
        tasklet_disable(&sc->intr_tq);
        spin_lock_bh(&sc->sc_pcu_lock);
 
-       if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
+       if (!sc->cur_chan->offchannel) {
                fastcc = false;
-               caldata = &sc->caldata;
+               caldata = &sc->cur_chan->caldata;
        }
 
        if (!hchan) {
@@ -292,6 +314,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
        if (!ath_prepare_reset(sc))
                fastcc = false;
 
+       spin_lock_bh(&sc->chan_lock);
+       sc->cur_chandef = sc->cur_chan->chandef;
+       spin_unlock_bh(&sc->chan_lock);
+
        ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
                hchan->channel, IS_CHAN_HT40(hchan), fastcc);
 
@@ -307,7 +333,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
        }
 
        if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
-           (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+           sc->cur_chan->offchannel)
                ath9k_mci_set_txpower(sc, true, false);
 
        if (!ath_complete_reset(sc, true))
@@ -320,98 +346,6 @@ out:
        return r;
 }
 
-
-/*
- * Set/change channels.  If the channel is really being changed, it's done
- * by reseting the chip.  To accomplish this we must first cleanup any pending
- * DMA, then restart stuff.
-*/
-static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_hw *hw = sc->hw;
-       struct ath9k_channel *hchan;
-       struct ieee80211_channel *chan = chandef->chan;
-       bool offchannel;
-       int pos = chan->hw_value;
-       int old_pos = -1;
-       int r;
-
-       if (test_bit(ATH_OP_INVALID, &common->op_flags))
-               return -EIO;
-
-       offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
-
-       if (ah->curchan)
-               old_pos = ah->curchan - &ah->channels[0];
-
-       ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
-               chan->center_freq, chandef->width);
-
-       /* update survey stats for the old channel before switching */
-       spin_lock_bh(&common->cc_lock);
-       ath_update_survey_stats(sc);
-       spin_unlock_bh(&common->cc_lock);
-
-       ath9k_cmn_get_channel(hw, ah, chandef);
-
-       /*
-        * If the operating channel changes, change the survey in-use flags
-        * along with it.
-        * Reset the survey data for the new channel, unless we're switching
-        * back to the operating channel from an off-channel operation.
-        */
-       if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
-               if (sc->cur_survey)
-                       sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
-               sc->cur_survey = &sc->survey[pos];
-
-               memset(sc->cur_survey, 0, sizeof(struct survey_info));
-               sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
-       } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
-               memset(&sc->survey[pos], 0, sizeof(struct survey_info));
-       }
-
-       hchan = &sc->sc_ah->channels[pos];
-       r = ath_reset_internal(sc, hchan);
-       if (r)
-               return r;
-
-       /*
-        * The most recent snapshot of channel->noisefloor for the old
-        * channel is only available after the hardware reset. Copy it to
-        * the survey stats now.
-        */
-       if (old_pos >= 0)
-               ath_update_survey_nf(sc, old_pos);
-
-       /*
-        * Enable radar pulse detection if on a DFS channel. Spectral
-        * scanning and radar detection can not be used concurrently.
-        */
-       if (hw->conf.radar_enabled) {
-               u32 rxfilter;
-
-               /* set HW specific DFS configuration */
-               ath9k_hw_set_radar_params(ah);
-               rxfilter = ath9k_hw_getrxfilter(ah);
-               rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
-                               ATH9K_RX_FILTER_PHYERR;
-               ath9k_hw_setrxfilter(ah, rxfilter);
-               ath_dbg(common, DFS, "DFS enabled at freq %d\n",
-                       chan->center_freq);
-       } else {
-               /* perform spectral scan if requested. */
-               if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
-                       sc->spectral_mode == SPECTRAL_CHANSCAN)
-                       ath9k_spectral_scan_trigger(hw);
-       }
-
-       return 0;
-}
-
 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
                            struct ieee80211_vif *vif)
 {
@@ -712,7 +646,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+       struct ieee80211_channel *curchan = sc->cur_chan->chandef.chan;
+       struct ath_chanctx *ctx = sc->cur_chan;
        struct ath9k_channel *init_channel;
        int r;
 
@@ -723,7 +658,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
        ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
-       init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
+       init_channel = ath9k_cmn_get_channel(hw, ah, &ctx->chandef);
+       sc->cur_chandef = hw->conf.chandef;
 
        /* Reset SERDES registers */
        ath9k_hw_configpcipowersave(ah, false);
@@ -886,6 +822,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        struct ath_common *common = ath9k_hw_common(ah);
        bool prev_idle;
 
+       cancel_work_sync(&sc->chanctx_work);
        mutex_lock(&sc->mutex);
 
        ath_cancel_work(sc);
@@ -934,7 +871,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        }
 
        if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
+               ah->curchan = ath9k_cmn_get_channel(hw, ah,
+                                                   &sc->cur_chan->chandef);
 
        ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        ath9k_hw_phy_disable(ah);
@@ -979,18 +917,29 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
                iter_data->has_hw_macaddr = true;
        }
 
+       if (!vif->bss_conf.use_short_slot)
+               iter_data->slottime = ATH9K_SLOT_TIME_20;
+
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
                iter_data->naps++;
+               if (vif->bss_conf.enable_beacon)
+                       iter_data->beacons = true;
                break;
        case NL80211_IFTYPE_STATION:
                iter_data->nstations++;
+               if (vif->bss_conf.assoc && !iter_data->primary_sta)
+                       iter_data->primary_sta = vif;
                break;
        case NL80211_IFTYPE_ADHOC:
                iter_data->nadhocs++;
+               if (vif->bss_conf.enable_beacon)
+                       iter_data->beacons = true;
                break;
        case NL80211_IFTYPE_MESH_POINT:
                iter_data->nmeshes++;
+               if (vif->bss_conf.enable_beacon)
+                       iter_data->beacons = true;
                break;
        case NL80211_IFTYPE_WDS:
                iter_data->nwds++;
@@ -1000,26 +949,12 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        }
 }
 
-static void ath9k_sta_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_softc *sc = data;
-       struct ath_vif *avp = (void *)vif->drv_priv;
-
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return;
-
-       if (avp->primary_sta_vif)
-               ath9k_set_assoc_state(sc, vif);
-}
-
 /* Called with sc->mutex held. */
-void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
-                              struct ieee80211_vif *vif,
+void ath9k_calculate_iter_data(struct ath_softc *sc,
+                              struct ath_chanctx *ctx,
                               struct ath9k_vif_iter_data *iter_data)
 {
-       struct ath_softc *sc = hw->priv;
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath_vif *avp;
 
        /*
         * Pick the MAC address of the first interface as the new hardware
@@ -1028,29 +963,80 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
         */
        memset(iter_data, 0, sizeof(*iter_data));
        memset(&iter_data->mask, 0xff, ETH_ALEN);
+       iter_data->slottime = ATH9K_SLOT_TIME_9;
+
+       list_for_each_entry(avp, &ctx->vifs, list)
+               ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif);
+
+       if (ctx == &sc->offchannel.chan) {
+               struct ieee80211_vif *vif;
+
+               if (sc->offchannel.state < ATH_OFFCHANNEL_ROC_START)
+                       vif = sc->offchannel.scan_vif;
+               else
+                       vif = sc->offchannel.roc_vif;
+
+               if (vif)
+                       ath9k_vif_iter(iter_data, vif->addr, vif);
+               iter_data->beacons = false;
+       }
+}
+
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+                                 struct ieee80211_vif *vif, bool changed)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       unsigned long flags;
 
-       if (vif)
-               ath9k_vif_iter(iter_data, vif->addr, vif);
+       set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+       /* Set the AID, BSSID and do beacon-sync only when
+        * the HW opmode is STATION.
+        *
+        * But the primary bit is set above in any case.
+        */
+       if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
+               return;
+
+       ether_addr_copy(common->curbssid, bss_conf->bssid);
+       common->curaid = bss_conf->aid;
+       ath9k_hw_write_associd(sc->sc_ah);
 
-       /* Get list of all active MAC addresses */
-       ieee80211_iterate_active_interfaces_atomic(
-               sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-               ath9k_vif_iter, iter_data);
+       if (changed) {
+               common->last_rssi = ATH_RSSI_DUMMY_MARKER;
+               sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
-       memcpy(common->macaddr, iter_data->hw_macaddr, ETH_ALEN);
+               spin_lock_irqsave(&sc->sc_pm_lock, flags);
+               sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+               spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+       }
+
+       if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+               ath9k_mci_update_wlan_channels(sc, false);
+
+       ath_dbg(common, CONFIG,
+               "Primary Station interface: %pM, BSSID: %pM\n",
+               vif->addr, common->curbssid);
 }
 
 /* Called with sc->mutex held. */
-static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
-                                         struct ieee80211_vif *vif)
+void ath9k_calculate_summary_state(struct ath_softc *sc,
+                                  struct ath_chanctx *ctx)
 {
-       struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_vif_iter_data iter_data;
-       enum nl80211_iftype old_opmode = ah->opmode;
 
-       ath9k_calculate_iter_data(hw, vif, &iter_data);
+       ath_chanctx_check_active(sc, ctx);
+
+       if (ctx != sc->cur_chan)
+               return;
+
+       ath9k_ps_wakeup(sc);
+       ath9k_calculate_iter_data(sc, ctx, &iter_data);
+
+       if (iter_data.has_hw_macaddr)
+               ether_addr_copy(common->macaddr, iter_data.hw_macaddr);
 
        memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
        ath_hw_setbssidmask(common);
@@ -1073,24 +1059,57 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
 
        ath9k_hw_setopmode(ah);
 
+       ctx->switch_after_beacon = false;
        if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
                ah->imask |= ATH9K_INT_TSFOOR;
-       else
+       else {
                ah->imask &= ~ATH9K_INT_TSFOOR;
+               if (iter_data.naps == 1 && iter_data.beacons)
+                       ctx->switch_after_beacon = true;
+       }
+
+       ah->imask &= ~ATH9K_INT_SWBA;
+       if (ah->opmode == NL80211_IFTYPE_STATION) {
+               bool changed = (iter_data.primary_sta != ctx->primary_sta);
 
+               iter_data.beacons = true;
+               if (iter_data.primary_sta) {
+                       ath9k_set_assoc_state(sc, iter_data.primary_sta,
+                                             changed);
+                       if (!ctx->primary_sta ||
+                           !ctx->primary_sta->bss_conf.assoc)
+                               ctx->primary_sta = iter_data.primary_sta;
+               } else {
+                       ctx->primary_sta = NULL;
+                       memset(common->curbssid, 0, ETH_ALEN);
+                       common->curaid = 0;
+                       ath9k_hw_write_associd(sc->sc_ah);
+                       if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+                               ath9k_mci_update_wlan_channels(sc, true);
+               }
+       } else if (iter_data.beacons) {
+               ah->imask |= ATH9K_INT_SWBA;
+       }
        ath9k_hw_set_interrupts(ah);
 
-       /*
-        * If we are changing the opmode to STATION,
-        * a beacon sync needs to be done.
-        */
-       if (ah->opmode == NL80211_IFTYPE_STATION &&
-           old_opmode == NL80211_IFTYPE_AP &&
-           test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
-               ieee80211_iterate_active_interfaces_atomic(
-                       sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                       ath9k_sta_vif_iter, sc);
+       if (iter_data.beacons)
+               set_bit(ATH_OP_BEACONS, &common->op_flags);
+       else
+               clear_bit(ATH_OP_BEACONS, &common->op_flags);
+
+       if (ah->slottime != iter_data.slottime) {
+               ah->slottime = iter_data.slottime;
+               ath9k_hw_init_global_settings(ah);
        }
+
+       if (iter_data.primary_sta)
+               set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+       else
+               clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
+       ctx->primary_sta = iter_data.primary_sta;
+
+       ath9k_ps_restore(sc);
 }
 
 static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -1101,6 +1120,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
        struct ath_node *an = &avp->mcast_node;
+       int i;
 
        mutex_lock(&sc->mutex);
 
@@ -1115,14 +1135,20 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
        sc->nvifs++;
 
-       ath9k_ps_wakeup(sc);
-       ath9k_calculate_summary_state(hw, vif);
-       ath9k_ps_restore(sc);
-
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
        avp->vif = vif;
+       if (!ath9k_use_chanctx) {
+               avp->chanctx = sc->cur_chan;
+               list_add_tail(&avp->list, &avp->chanctx->vifs);
+       }
+       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+               vif->hw_queue[i] = i;
+       if (vif->type == NL80211_IFTYPE_AP)
+               vif->cab_queue = hw->queues - 2;
+       else
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
 
        an->sc = sc;
        an->sta = NULL;
@@ -1141,6 +1167,8 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       int i;
 
        mutex_lock(&sc->mutex);
 
@@ -1157,13 +1185,19 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        vif->type = new_type;
        vif->p2p = p2p;
 
-       ath9k_ps_wakeup(sc);
-       ath9k_calculate_summary_state(hw, vif);
-       ath9k_ps_restore(sc);
-
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_assign_slot(sc, vif);
 
+       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+               vif->hw_queue[i] = i;
+
+       if (vif->type == NL80211_IFTYPE_AP)
+               vif->cab_queue = hw->queues - 2;
+       else
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+       ath9k_calculate_summary_state(sc, avp->chanctx);
+
        mutex_unlock(&sc->mutex);
        return 0;
 }
@@ -1211,14 +1245,12 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
 
        sc->nvifs--;
        sc->tx99_vif = NULL;
+       if (!ath9k_use_chanctx)
+               list_del(&avp->list);
 
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
-       ath9k_ps_wakeup(sc);
-       ath9k_calculate_summary_state(hw, NULL);
-       ath9k_ps_restore(sc);
-
        ath_tx_node_cleanup(sc, &avp->mcast_node);
 
        mutex_unlock(&sc->mutex);
@@ -1345,7 +1377,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
-       bool reset_channel = false;
+       struct ath_chanctx *ctx = sc->cur_chan;
 
        ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
@@ -1361,7 +1393,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                         * The chip needs a reset to properly wake up from
                         * full sleep
                         */
-                       reset_channel = ah->chip_fullsleep;
+                       ath_chanctx_set_channel(sc, ctx, &ctx->chandef);
                }
        }
 
@@ -1391,20 +1423,16 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
-               if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
-                       ath_err(common, "Unable to set channel\n");
-                       mutex_unlock(&sc->mutex);
-                       ath9k_ps_restore(sc);
-                       return -EINVAL;
-               }
+       if (!ath9k_use_chanctx && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+               ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL);
+               ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
        }
 
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
                ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
-               sc->config.txpowlimit = 2 * conf->power_level;
+               sc->cur_chan->txpower = 2 * conf->power_level;
                ath9k_cmn_update_txpow(ah, sc->curtxpow,
-                                      sc->config.txpowlimit, &sc->curtxpow);
+                                      sc->cur_chan->txpower, &sc->curtxpow);
        }
 
        mutex_unlock(&sc->mutex);
@@ -1659,58 +1687,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
        return ret;
 }
 
-static void ath9k_set_assoc_state(struct ath_softc *sc,
-                                 struct ieee80211_vif *vif)
-{
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_vif *avp = (void *)vif->drv_priv;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       unsigned long flags;
-
-       set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
-       avp->primary_sta_vif = true;
-
-       /*
-        * Set the AID, BSSID and do beacon-sync only when
-        * the HW opmode is STATION.
-        *
-        * But the primary bit is set above in any case.
-        */
-       if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
-               return;
-
-       memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
-       common->curaid = bss_conf->aid;
-       ath9k_hw_write_associd(sc->sc_ah);
-
-       common->last_rssi = ATH_RSSI_DUMMY_MARKER;
-       sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-
-       spin_lock_irqsave(&sc->sc_pm_lock, flags);
-       sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
-       spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
-
-       if (ath9k_hw_mci_is_enabled(sc->sc_ah))
-               ath9k_mci_update_wlan_channels(sc, false);
-
-       ath_dbg(common, CONFIG,
-               "Primary Station interface: %pM, BSSID: %pM\n",
-               vif->addr, common->curbssid);
-}
-
-static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath_softc *sc = data;
-       struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
-       if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
-               return;
-
-       if (bss_conf->assoc)
-               ath9k_set_assoc_state(sc, vif);
-}
-
 void ath9k_p2p_ps_timer(void *priv)
 {
        struct ath_softc *sc = priv;
@@ -1720,7 +1696,11 @@ void ath9k_p2p_ps_timer(void *priv)
        struct ath_node *an;
        u32 tsf;
 
-       if (!avp)
+       del_timer_sync(&sc->sched.timer);
+       ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer);
+       ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER);
+
+       if (!avp || avp->chanctx != sc->cur_chan)
                return;
 
        tsf = ath9k_hw_gettsf32(sc->sc_ah);
@@ -1795,26 +1775,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
                        bss_conf->bssid, bss_conf->assoc);
 
-               if (avp->primary_sta_vif && !bss_conf->assoc) {
-                       clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
-                       avp->primary_sta_vif = false;
-
-                       if (ah->opmode == NL80211_IFTYPE_STATION)
-                               clear_bit(ATH_OP_BEACONS, &common->op_flags);
-               }
-
-               ieee80211_iterate_active_interfaces_atomic(
-                       sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-                       ath9k_bss_assoc_iter, sc);
-
-               if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
-                   ah->opmode == NL80211_IFTYPE_STATION) {
-                       memset(common->curbssid, 0, ETH_ALEN);
-                       common->curaid = 0;
-                       ath9k_hw_write_associd(sc->sc_ah);
-                       if (ath9k_hw_mci_is_enabled(sc->sc_ah))
-                               ath9k_mci_update_wlan_channels(sc, true);
-               }
+               ath9k_calculate_summary_state(sc, avp->chanctx);
+               if (bss_conf->assoc)
+                       ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_ASSOC);
        }
 
        if (changed & BSS_CHANGED_IBSS) {
@@ -1824,10 +1787,15 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
        }
 
        if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
-           (changed & BSS_CHANGED_BEACON_INT))
+           (changed & BSS_CHANGED_BEACON_INT) ||
+           (changed & BSS_CHANGED_BEACON_INFO)) {
+               if (changed & BSS_CHANGED_BEACON_ENABLED)
+                       ath9k_calculate_summary_state(sc, avp->chanctx);
                ath9k_beacon_config(sc, vif, changed);
+       }
 
-       if (changed & BSS_CHANGED_ERP_SLOT) {
+       if ((avp->chanctx == sc->cur_chan) &&
+           (changed & BSS_CHANGED_ERP_SLOT)) {
                if (bss_conf->use_short_slot)
                        slottime = 9;
                else
@@ -2030,25 +1998,32 @@ static bool ath9k_has_tx_pending(struct ath_softc *sc)
 
 static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        u32 queues, bool drop)
+{
+       struct ath_softc *sc = hw->priv;
+
+       mutex_lock(&sc->mutex);
+       __ath9k_flush(hw, queues, drop);
+       mutex_unlock(&sc->mutex);
+}
+
+void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        int timeout = HZ / 5; /* 200 ms */
        bool drain_txq;
+       int i;
 
-       mutex_lock(&sc->mutex);
        cancel_delayed_work_sync(&sc->tx_complete_work);
 
        if (ah->ah_flags & AH_UNPLUGGED) {
                ath_dbg(common, ANY, "Device has been unplugged!\n");
-               mutex_unlock(&sc->mutex);
                return;
        }
 
        if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(common, ANY, "Device not present\n");
-               mutex_unlock(&sc->mutex);
                return;
        }
 
@@ -2066,11 +2041,13 @@ static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        ath_reset(sc);
 
                ath9k_ps_restore(sc);
-               ieee80211_wake_queues(hw);
+               for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+                       ieee80211_wake_queue(sc->hw,
+                                            sc->cur_chan->hw_queue_base + i);
+               }
        }
 
        ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
-       mutex_unlock(&sc->mutex);
 }
 
 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
@@ -2230,6 +2207,403 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
        clear_bit(ATH_OP_SCANNING, &common->op_flags);
 }
 
+static int ath_scan_channel_duration(struct ath_softc *sc,
+                                    struct ieee80211_channel *chan)
+{
+       struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+
+       if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR))
+               return (HZ / 9); /* ~110 ms */
+
+       return (HZ / 16); /* ~60 ms */
+}
+
+static void
+ath_scan_next_channel(struct ath_softc *sc)
+{
+       struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+       struct ieee80211_channel *chan;
+
+       if (sc->offchannel.scan_idx >= req->n_channels) {
+               sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+               ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
+                                  NULL);
+               return;
+       }
+
+       chan = req->channels[sc->offchannel.scan_idx++];
+       sc->offchannel.duration = ath_scan_channel_duration(sc, chan);
+       sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND;
+       ath_chanctx_offchan_switch(sc, chan);
+}
+
+static void ath_offchannel_next(struct ath_softc *sc)
+{
+       struct ieee80211_vif *vif;
+
+       if (sc->offchannel.scan_req) {
+               vif = sc->offchannel.scan_vif;
+               sc->offchannel.chan.txpower = vif->bss_conf.txpower;
+               ath_scan_next_channel(sc);
+       } else if (sc->offchannel.roc_vif) {
+               vif = sc->offchannel.roc_vif;
+               sc->offchannel.chan.txpower = vif->bss_conf.txpower;
+               sc->offchannel.duration = sc->offchannel.roc_duration;
+               sc->offchannel.state = ATH_OFFCHANNEL_ROC_START;
+               ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan);
+       } else {
+               ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false),
+                                  NULL);
+               sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+               if (sc->ps_idle)
+                       ath_cancel_work(sc);
+       }
+}
+
+static void ath_roc_complete(struct ath_softc *sc, bool abort)
+{
+       sc->offchannel.roc_vif = NULL;
+       sc->offchannel.roc_chan = NULL;
+       if (!abort)
+               ieee80211_remain_on_channel_expired(sc->hw);
+       ath_offchannel_next(sc);
+       ath9k_ps_restore(sc);
+}
+
+static void ath_scan_complete(struct ath_softc *sc, bool abort)
+{
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+       sc->offchannel.scan_req = NULL;
+       sc->offchannel.scan_vif = NULL;
+       sc->offchannel.state = ATH_OFFCHANNEL_IDLE;
+       ieee80211_scan_completed(sc->hw, abort);
+       clear_bit(ATH_OP_SCANNING, &common->op_flags);
+       ath_offchannel_next(sc);
+       ath9k_ps_restore(sc);
+}
+
+static void ath_scan_send_probe(struct ath_softc *sc,
+                               struct cfg80211_ssid *ssid)
+{
+       struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+       struct ieee80211_vif *vif = sc->offchannel.scan_vif;
+       struct ath_tx_control txctl = {};
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info;
+       int band = sc->offchannel.chan.chandef.chan->band;
+
+       skb = ieee80211_probereq_get(sc->hw, vif,
+                       ssid->ssid, ssid->ssid_len, req->ie_len);
+       if (!skb)
+               return;
+
+       info = IEEE80211_SKB_CB(skb);
+       if (req->no_cck)
+               info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
+       if (req->ie_len)
+               memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
+
+       skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+
+       if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL))
+               goto error;
+
+       txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+       txctl.force_channel = true;
+       if (ath_tx_start(sc->hw, skb, &txctl))
+               goto error;
+
+       return;
+
+error:
+       ieee80211_free_txskb(sc->hw, skb);
+}
+
+static void ath_scan_channel_start(struct ath_softc *sc)
+{
+       struct cfg80211_scan_request *req = sc->offchannel.scan_req;
+       int i;
+
+       if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) &&
+           req->n_ssids) {
+               for (i = 0; i < req->n_ssids; i++)
+                       ath_scan_send_probe(sc, &req->ssids[i]);
+
+       }
+
+       sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT;
+       mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration);
+}
+
+void ath_offchannel_channel_change(struct ath_softc *sc)
+{
+       switch (sc->offchannel.state) {
+       case ATH_OFFCHANNEL_PROBE_SEND:
+               if (!sc->offchannel.scan_req)
+                       return;
+
+               if (sc->cur_chan->chandef.chan !=
+                   sc->offchannel.chan.chandef.chan)
+                       return;
+
+               ath_scan_channel_start(sc);
+               break;
+       case ATH_OFFCHANNEL_IDLE:
+               if (!sc->offchannel.scan_req)
+                       return;
+
+               ath_scan_complete(sc, false);
+               break;
+       case ATH_OFFCHANNEL_ROC_START:
+               if (sc->cur_chan != &sc->offchannel.chan)
+                       break;
+
+               sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT;
+               mod_timer(&sc->offchannel.timer, jiffies +
+                         msecs_to_jiffies(sc->offchannel.duration));
+               ieee80211_ready_on_channel(sc->hw);
+               break;
+       case ATH_OFFCHANNEL_ROC_DONE:
+               ath_roc_complete(sc, false);
+               break;
+       default:
+               break;
+       }
+}
+
+void ath_offchannel_timer(unsigned long data)
+{
+       struct ath_softc *sc = (struct ath_softc *)data;
+       struct ath_chanctx *ctx;
+
+       switch (sc->offchannel.state) {
+       case ATH_OFFCHANNEL_PROBE_WAIT:
+               if (!sc->offchannel.scan_req)
+                       return;
+
+               /* get first active channel context */
+               ctx = ath_chanctx_get_oper_chan(sc, true);
+               if (ctx->active) {
+                       sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND;
+                       ath_chanctx_switch(sc, ctx, NULL);
+                       mod_timer(&sc->offchannel.timer, jiffies + HZ / 10);
+                       break;
+               }
+               /* fall through */
+       case ATH_OFFCHANNEL_SUSPEND:
+               if (!sc->offchannel.scan_req)
+                       return;
+
+               ath_scan_next_channel(sc);
+               break;
+       case ATH_OFFCHANNEL_ROC_START:
+       case ATH_OFFCHANNEL_ROC_WAIT:
+               ctx = ath_chanctx_get_oper_chan(sc, false);
+               sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
+               ath_chanctx_switch(sc, ctx, NULL);
+               break;
+       default:
+               break;
+       }
+}
+
+static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        struct ieee80211_scan_request *hw_req)
+{
+       struct cfg80211_scan_request *req = &hw_req->req;
+       struct ath_softc *sc = hw->priv;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       int ret = 0;
+
+       mutex_lock(&sc->mutex);
+
+       if (WARN_ON(sc->offchannel.scan_req)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ath9k_ps_wakeup(sc);
+       set_bit(ATH_OP_SCANNING, &common->op_flags);
+       sc->offchannel.scan_vif = vif;
+       sc->offchannel.scan_req = req;
+       sc->offchannel.scan_idx = 0;
+
+       if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE)
+               ath_offchannel_next(sc);
+
+out:
+       mutex_unlock(&sc->mutex);
+
+       return ret;
+}
+
+static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = hw->priv;
+
+       mutex_lock(&sc->mutex);
+       del_timer_sync(&sc->offchannel.timer);
+       ath_scan_complete(sc, true);
+       mutex_unlock(&sc->mutex);
+}
+
+static int ath9k_remain_on_channel(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_channel *chan, int duration,
+                                  enum ieee80211_roc_type type)
+{
+       struct ath_softc *sc = hw->priv;
+       int ret = 0;
+
+       mutex_lock(&sc->mutex);
+
+       if (WARN_ON(sc->offchannel.roc_vif)) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ath9k_ps_wakeup(sc);
+       sc->offchannel.roc_vif = vif;
+       sc->offchannel.roc_chan = chan;
+       sc->offchannel.roc_duration = duration;
+
+       if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE)
+               ath_offchannel_next(sc);
+
+out:
+       mutex_unlock(&sc->mutex);
+
+       return ret;
+}
+
+static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct ath_softc *sc = hw->priv;
+
+       mutex_lock(&sc->mutex);
+
+       del_timer_sync(&sc->offchannel.timer);
+
+       if (sc->offchannel.roc_vif) {
+               if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
+                       ath_roc_complete(sc, true);
+       }
+
+       mutex_unlock(&sc->mutex);
+
+       return 0;
+}
+
+static int ath9k_add_chanctx(struct ieee80211_hw *hw,
+                            struct ieee80211_chanctx_conf *conf)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_chanctx *ctx, **ptr;
+       int pos;
+
+       mutex_lock(&sc->mutex);
+
+       ath_for_each_chanctx(sc, ctx) {
+               if (ctx->assigned)
+                       continue;
+
+               ptr = (void *) conf->drv_priv;
+               *ptr = ctx;
+               ctx->assigned = true;
+               pos = ctx - &sc->chanctx[0];
+               ctx->hw_queue_base = pos * IEEE80211_NUM_ACS;
+               ath_chanctx_set_channel(sc, ctx, &conf->def);
+               mutex_unlock(&sc->mutex);
+               return 0;
+       }
+       mutex_unlock(&sc->mutex);
+       return -ENOSPC;
+}
+
+
+static void ath9k_remove_chanctx(struct ieee80211_hw *hw,
+                                struct ieee80211_chanctx_conf *conf)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_chanctx *ctx = ath_chanctx_get(conf);
+
+       mutex_lock(&sc->mutex);
+       ctx->assigned = false;
+       ctx->hw_queue_base = -1;
+       ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN);
+       mutex_unlock(&sc->mutex);
+}
+
+static void ath9k_change_chanctx(struct ieee80211_hw *hw,
+                                struct ieee80211_chanctx_conf *conf,
+                                u32 changed)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_chanctx *ctx = ath_chanctx_get(conf);
+
+       mutex_lock(&sc->mutex);
+       ath_chanctx_set_channel(sc, ctx, &conf->def);
+       mutex_unlock(&sc->mutex);
+}
+
+static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_chanctx_conf *conf)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       struct ath_chanctx *ctx = ath_chanctx_get(conf);
+       int i;
+
+       mutex_lock(&sc->mutex);
+       avp->chanctx = ctx;
+       list_add_tail(&avp->list, &ctx->vifs);
+       ath9k_calculate_summary_state(sc, ctx);
+       for (i = 0; i < IEEE80211_NUM_ACS; i++)
+               vif->hw_queue[i] = ctx->hw_queue_base + i;
+       mutex_unlock(&sc->mutex);
+
+       return 0;
+}
+
+static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      struct ieee80211_chanctx_conf *conf)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_vif *avp = (void *)vif->drv_priv;
+       struct ath_chanctx *ctx = ath_chanctx_get(conf);
+       int ac;
+
+       mutex_lock(&sc->mutex);
+       avp->chanctx = NULL;
+       list_del(&avp->list);
+       ath9k_calculate_summary_state(sc, ctx);
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+               vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+       mutex_unlock(&sc->mutex);
+}
+
+void ath9k_fill_chanctx_ops(void)
+{
+       if (!ath9k_use_chanctx)
+               return;
+
+       ath9k_ops.hw_scan = ath9k_hw_scan;
+       ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
+       ath9k_ops.remain_on_channel  = ath9k_remain_on_channel;
+       ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
+       ath9k_ops.add_chanctx        = ath9k_add_chanctx;
+       ath9k_ops.remove_chanctx     = ath9k_remove_chanctx;
+       ath9k_ops.change_chanctx     = ath9k_change_chanctx;
+       ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
+       ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
+       ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
+}
+
 struct ieee80211_ops ath9k_ops = {
        .tx                 = ath9k_tx,
        .start              = ath9k_start,
index a0dbcc4123840cbeab93e56939ce6b095f3e885d..3f7a11edb82a77dedcfdc5f38fa4c6c832e9579e 100644 (file)
@@ -706,7 +706,7 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
                return;
 
        if (setchannel) {
-               struct ath9k_hw_cal_data *caldata = &sc->caldata;
+               struct ath9k_hw_cal_data *caldata = &sc->cur_chan->caldata;
                if (IS_CHAN_HT40PLUS(ah->curchan) &&
                    (ah->curchan->channel > caldata->channel) &&
                    (ah->curchan->channel <= caldata->channel + 20))
@@ -720,7 +720,7 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
                mci_hw->concur_tx = concur_tx;
 
        if (old_concur_tx != mci_hw->concur_tx)
-               ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+               ath9k_hw_set_txpowerlimit(ah, sc->cur_chan->txpower, false);
 }
 
 static void ath9k_mci_stomp_audio(struct ath_softc *sc)
index 4dec09e565ed865470ff6bc5738e86c21d523906..7a2b2c5caced25aa7919dad9528fd4bf4a64717b 100644 (file)
@@ -843,6 +843,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENODEV;
        }
 
+       ath9k_fill_chanctx_ops();
        hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
        if (!hw) {
                dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
index 9105a92364f78241fa565b661a017ce59f7400f5..74ab1d02013bf002bf04c0cfd197a2ecd8cdc29a 100644 (file)
@@ -259,7 +259,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
        ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
        ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
        ath_opmode_init(sc);
-       ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
+       ath9k_hw_startpcureceive(sc->sc_ah, sc->cur_chan->offchannel);
 }
 
 static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -374,6 +374,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
 
 u32 ath_calcrxfilter(struct ath_softc *sc)
 {
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        u32 rfilt;
 
        if (config_enabled(CONFIG_ATH9K_TX99))
@@ -424,6 +425,10 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
        if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
                rfilt |= ATH9K_RX_FILTER_4ADDRESS;
 
+       if (ath9k_use_chanctx &&
+           test_bit(ATH_OP_SCANNING, &common->op_flags))
+               rfilt |= ATH9K_RX_FILTER_BEACON;
+
        return rfilt;
 
 }
@@ -457,7 +462,7 @@ int ath_startrecv(struct ath_softc *sc)
 
 start_recv:
        ath_opmode_init(sc);
-       ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
+       ath9k_hw_startpcureceive(ah, sc->cur_chan->offchannel);
 
        return 0;
 }
@@ -540,7 +545,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
                sc->ps_flags &= ~PS_BEACON_SYNC;
                ath_dbg(common, PS,
                        "Reconfigure beacon timers based on synchronized timestamp\n");
-               if (!(WARN_ON_ONCE(sc->cur_beacon_conf.beacon_interval == 0)))
+               if (!(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0)))
                        ath9k_set_beacon(sc);
                if (sc->p2p_ps_vif)
                        ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
@@ -887,6 +892,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
                return -EINVAL;
        }
 
+       if (rx_stats->is_mybeacon) {
+               sc->sched.next_tbtt = rx_stats->rs_tstamp;
+               ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_BEACON_RECEIVED);
+       }
+
        ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
 
        rx_status->band = ah->curchan->chan->band;
index f1bbce3f7774ee16a9e29b8bb62f72ade1a9d99f..a1499700bcf26d243fd159f0d6d918b459c8239f 100644 (file)
 #define AR_SREV_VERSION_9531            0x500
 #define AR_SREV_REVISION_9531_10        0
 #define AR_SREV_REVISION_9531_11        1
+#define AR_SREV_REVISION_9531_20        2
 
 #define AR_SREV_5416(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
 #define AR_SREV_9531_11(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
         ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_11))
+#define AR_SREV_9531_20(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9531) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9531_20))
 
 /* NOTE: When adding chips newer than Peacock, add chip check here */
 #define AR_SREV_9580_10_OR_LATER(_ah) \
index a65cfb91adcae12ea2d196c8e7c0f8658316f6ef..23972924c774becd09f633f0708159594bc1aae1 100644 (file)
@@ -76,7 +76,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
        tx_info = IEEE80211_SKB_CB(skb);
        memset(tx_info, 0, sizeof(*tx_info));
        rate = &tx_info->control.rates[0];
-       tx_info->band = hw->conf.chandef.chan->band;
+       tx_info->band = sc->cur_chan->chandef.chan->band;
        tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
        tx_info->control.vif = sc->tx99_vif;
        rate->count = 1;
index 2879887f56912dea38e2568ddb748f25790e7bb0..a4f4f0da81f6e2a1cc71349a0c68185a2f07e546 100644 (file)
@@ -193,6 +193,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
        u32 wow_triggers_enabled = 0;
        int ret = 0;
 
+       cancel_work_sync(&sc->chanctx_work);
        mutex_lock(&sc->mutex);
 
        ath_cancel_work(sc);
index 66acb2cbd9df3cc45c307bb6b38118436da3d01a..d4927c9a6bae9a7cf86315bfbd7e8a1374260ca3 100644 (file)
@@ -103,9 +103,16 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
                ieee80211_tx_status(sc->hw, skb);
 }
 
-static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
+                            struct ath_atx_tid *tid)
 {
        struct ath_atx_ac *ac = tid->ac;
+       struct list_head *list;
+       struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+       struct ath_chanctx *ctx = avp->chanctx;
+
+       if (!ctx)
+               return;
 
        if (tid->sched)
                return;
@@ -117,7 +124,9 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
                return;
 
        ac->sched = true;
-       list_add_tail(&ac->list, &txq->axq_acq);
+
+       list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+       list_add_tail(&ac->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -147,7 +156,8 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
 static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
                             struct sk_buff *skb)
 {
-       int q;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int q, hw_queue;
 
        q = skb_get_queue_mapping(skb);
        if (txq == sc->tx.uapsdq)
@@ -159,9 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
        if (WARN_ON(--txq->pending_frames < 0))
                txq->pending_frames = 0;
 
+       hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
        if (txq->stopped &&
            txq->pending_frames < sc->tx.txq_max_pending[q]) {
-               ieee80211_wake_queue(sc->hw, q);
+               ieee80211_wake_queue(sc->hw, hw_queue);
                txq->stopped = false;
        }
 }
@@ -626,7 +637,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                skb_queue_splice_tail(&bf_pending, &tid->retry_q);
                if (!an->sleeping) {
-                       ath_tx_queue_tid(txq, tid);
+                       ath_tx_queue_tid(sc, txq, tid);
 
                        if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
                                tid->ac->clear_ps_filter = true;
@@ -1483,7 +1494,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
                ac->clear_ps_filter = true;
 
                if (ath_tid_has_buffered(tid)) {
-                       ath_tx_queue_tid(txq, tid);
+                       ath_tx_queue_tid(sc, txq, tid);
                        ath_txq_schedule(sc, txq);
                }
 
@@ -1507,7 +1518,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
        tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
 
        if (ath_tid_has_buffered(tid)) {
-               ath_tx_queue_tid(txq, tid);
+               ath_tx_queue_tid(sc, txq, tid);
                ath_txq_schedule(sc, txq);
        }
 
@@ -1642,7 +1653,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                txq->axq_link = NULL;
                __skb_queue_head_init(&txq->complete_q);
                INIT_LIST_HEAD(&txq->axq_q);
-               INIT_LIST_HEAD(&txq->axq_acq);
                spin_lock_init(&txq->axq_lock);
                txq->axq_depth = 0;
                txq->axq_ampdu_depth = 0;
@@ -1686,7 +1696,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 int ath_cabq_update(struct ath_softc *sc)
 {
        struct ath9k_tx_queue_info qi;
-       struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
        int qnum = sc->beacon.cabq->axq_qnum;
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1804,7 +1814,7 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
        sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
 }
 
-/* For each axq_acq entry, for each tid, try to schedule packets
+/* For each acq entry, for each tid, try to schedule packets
  * for transmit until ampdu_depth has reached min Q depth.
  */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
@@ -1812,19 +1822,31 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_atx_ac *ac, *last_ac;
        struct ath_atx_tid *tid, *last_tid;
+       struct list_head *ac_list;
        bool sent = false;
 
+       if (txq->mac80211_qnum < 0)
+               return;
+
+       spin_lock_bh(&sc->chan_lock);
+       ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+       spin_unlock_bh(&sc->chan_lock);
+
        if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
-           list_empty(&txq->axq_acq))
+           list_empty(ac_list))
                return;
 
+       spin_lock_bh(&sc->chan_lock);
        rcu_read_lock();
 
-       last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
-       while (!list_empty(&txq->axq_acq)) {
+       last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
+       while (!list_empty(ac_list)) {
                bool stop = false;
 
-               ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+               if (sc->cur_chan->stopped)
+                       break;
+
+               ac = list_first_entry(ac_list, struct ath_atx_ac, list);
                last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
                list_del(&ac->list);
                ac->sched = false;
@@ -1844,7 +1866,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                         * are pending for the tid
                         */
                        if (ath_tid_has_buffered(tid))
-                               ath_tx_queue_tid(txq, tid);
+                               ath_tx_queue_tid(sc, txq, tid);
 
                        if (stop || tid == last_tid)
                                break;
@@ -1852,7 +1874,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 
                if (!list_empty(&ac->tid_q) && !ac->sched) {
                        ac->sched = true;
-                       list_add_tail(&ac->list, &txq->axq_acq);
+                       list_add_tail(&ac->list, ac_list);
                }
 
                if (stop)
@@ -1863,12 +1885,27 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                                break;
 
                        sent = false;
-                       last_ac = list_entry(txq->axq_acq.prev,
+                       last_ac = list_entry(ac_list->prev,
                                             struct ath_atx_ac, list);
                }
        }
 
        rcu_read_unlock();
+       spin_unlock_bh(&sc->chan_lock);
+}
+
+void ath_txq_schedule_all(struct ath_softc *sc)
+{
+       struct ath_txq *txq;
+       int i;
+
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               txq = sc->tx.txq_map[i];
+
+               spin_lock_bh(&txq->axq_lock);
+               ath_txq_schedule(sc, txq);
+               spin_unlock_bh(&txq->axq_lock);
+       }
 }
 
 /***********/
@@ -2150,13 +2187,21 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
+       struct ath_vif *avp = NULL;
        struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
        struct ath_atx_tid *tid = NULL;
        struct ath_buf *bf;
-       int q;
+       bool queue;
+       int q, hw_queue;
        int ret;
 
+       if (vif)
+               avp = (void *)vif->drv_priv;
+
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+               txctl->force_channel = true;
+
        ret = ath_tx_prepare(hw, skb, txctl);
        if (ret)
            return ret;
@@ -2168,24 +2213,39 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
         */
 
        q = skb_get_queue_mapping(skb);
+       hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
 
        ath_txq_lock(sc, txq);
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
            !txq->stopped) {
-               ieee80211_stop_queue(sc->hw, q);
+               ieee80211_stop_queue(sc->hw, hw_queue);
                txq->stopped = true;
        }
 
-       if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+       queue = ieee80211_is_data_present(hdr->frame_control);
+
+       /* Force queueing of all frames that belong to a virtual interface on
+        * a different channel context, to ensure that they are sent on the
+        * correct channel.
+        */
+       if (((avp && avp->chanctx != sc->cur_chan) ||
+            sc->cur_chan->stopped) && !txctl->force_channel) {
+               if (!txctl->an)
+                       txctl->an = &avp->mcast_node;
+               info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE;
+               queue = true;
+       }
+
+       if (txctl->an && queue)
                tid = ath_get_skb_tid(sc, txctl->an, skb);
 
-       if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+       if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE |
+                          IEEE80211_TX_CTL_TX_OFFCHAN)) {
                ath_txq_unlock(sc, txq);
                txq = sc->tx.uapsdq;
                ath_txq_lock(sc, txq);
-       } else if (txctl->an &&
-                  ieee80211_is_data_present(hdr->frame_control)) {
+       } else if (txctl->an && queue) {
                WARN_ON(tid->ac->txq != txctl->txq);
 
                if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
@@ -2198,7 +2258,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                TX_STAT_INC(txq->axq_qnum, a_queued_sw);
                __skb_queue_tail(&tid->buf_q, skb);
                if (!txctl->an->sleeping)
-                       ath_tx_queue_tid(txq, tid);
+                       ath_tx_queue_tid(sc, txq, tid);
 
                ath_txq_schedule(sc, txq);
                goto out;
@@ -2244,8 +2304,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        int max_duration;
 
        max_duration =
-               sc->cur_beacon_conf.beacon_interval * 1000 *
-               sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
+               sc->cur_chan->beacon.beacon_interval * 1000 *
+               sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
 
        do {
                struct ath_frame_info *fi = get_frame_info(skb);
@@ -2560,6 +2620,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                        sc->beacon.tx_processed = true;
                        sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
 
+                       ath_chanctx_event(sc, NULL,
+                                         ATH_CHANCTX_EVENT_BEACON_SENT);
                        ath9k_csa_update(sc);
                        continue;
                }
index ab4ee7d39ad385a0da2c92f80ef0ce5bade8f5b4..b80b2138ce3c745d93d9bc973ac83cbedc984aab 100644 (file)
@@ -1139,7 +1139,6 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
 
        default:
                return -EINVAL;
-               break;
        }
 
        for (; i >= 0; i--) {
index 4ab5370ab7a6ff5df3d5e4b1525b7f852cc55cb6..b71d2b33532de392fb68094309804bdd585d5c18 100644 (file)
@@ -488,7 +488,6 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
                ret = -EOPNOTSUPP;
                goto out;
-               break;
        }
 
 out:
index 820d4ebd93222b6e3a7b28dd68d1d590e2b97a7b..4ac2c208c9ba3ff109e8c8942b1b9637e9b7cf0d 100644 (file)
@@ -104,8 +104,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type)
        return -EOPNOTSUPP;
 }
 
-static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
-                             struct station_info *sinfo)
+int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+                      struct station_info *sinfo)
 {
        struct wmi_notify_req_cmd cmd = {
                .cid = cid,
@@ -287,6 +287,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                return -EBUSY;
        }
 
+       wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
        wil->scan_request = request;
        mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
 
@@ -443,15 +444,15 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
        return rc;
 }
 
-static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
-                               struct wireless_dev *wdev,
-                               struct cfg80211_mgmt_tx_params *params,
-                               u64 *cookie)
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        struct cfg80211_mgmt_tx_params *params,
+                        u64 *cookie)
 {
        const u8 *buf = params->buf;
        size_t len = params->len;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
+       bool tx_status = false;
        struct ieee80211_mgmt *mgmt_frame = (void *)buf;
        struct wmi_sw_tx_req_cmd *cmd;
        struct {
@@ -460,8 +461,10 @@ static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
        } __packed evt;
 
        cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto out;
+       }
 
        memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
        cmd->len = cpu_to_le16(len);
@@ -470,10 +473,12 @@ static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
        rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
                      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
        if (rc == 0)
-               rc = evt.evt.status;
+               tx_status = !evt.evt.status;
 
        kfree(cmd);
-
+ out:
+       cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
+                               tx_status, GFP_KERNEL);
        return rc;
 }
 
@@ -562,6 +567,34 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
        return rc;
 }
 
+static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
+{
+       print_hex_dump_bytes("head     ", DUMP_PREFIX_OFFSET,
+                            b->head, b->head_len);
+       print_hex_dump_bytes("tail     ", DUMP_PREFIX_OFFSET,
+                            b->tail, b->tail_len);
+       print_hex_dump_bytes("BCON IE  ", DUMP_PREFIX_OFFSET,
+                            b->beacon_ies, b->beacon_ies_len);
+       print_hex_dump_bytes("PROBE    ", DUMP_PREFIX_OFFSET,
+                            b->probe_resp, b->probe_resp_len);
+       print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET,
+                            b->proberesp_ies, b->proberesp_ies_len);
+       print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET,
+                            b->assocresp_ies, b->assocresp_ies_len);
+}
+
+static void wil_print_crypto(struct wil6210_priv *wil,
+                            struct cfg80211_crypto_settings *c)
+{
+       wil_dbg_misc(wil, "WPA versions: 0x%08x cipher group 0x%08x\n",
+                    c->wpa_versions, c->cipher_group);
+       wil_dbg_misc(wil, "Pairwise ciphers [%d]\n", c->n_ciphers_pairwise);
+       wil_dbg_misc(wil, "AKM suites [%d]\n", c->n_akm_suites);
+       wil_dbg_misc(wil, "Control port : %d, eth_type 0x%04x no_encrypt %d\n",
+                    c->control_port, be16_to_cpu(c->control_port_ethertype),
+                    c->control_port_no_encrypt);
+}
+
 static int wil_fix_bcon(struct wil6210_priv *wil,
                        struct cfg80211_beacon_data *bcon)
 {
@@ -595,8 +628,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        struct wireless_dev *wdev = ndev->ieee80211_ptr;
        struct ieee80211_channel *channel = info->chandef.chan;
        struct cfg80211_beacon_data *bcon = &info->beacon;
+       struct cfg80211_crypto_settings *crypto = &info->crypto;
        u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
 
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
        if (!channel) {
                wil_err(wil, "AP: No channel???\n");
                return -EINVAL;
@@ -604,11 +640,19 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
 
        wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
                     channel->center_freq, info->privacy ? "secure" : "open");
+       wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
+                    info->privacy, info->auth_type);
+       wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
+                    info->dtim_period);
        print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
                             info->ssid, info->ssid_len);
+       wil_print_bcon_data(bcon);
+       wil_print_crypto(wil, crypto);
 
-       if (wil_fix_bcon(wil, bcon))
+       if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
+               wil_print_bcon_data(bcon);
+       }
 
        mutex_lock(&wil->mutex);
 
@@ -663,6 +707,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
        int rc = 0;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+       wil_dbg_misc(wil, "%s()\n", __func__);
+
        mutex_lock(&wil->mutex);
 
        rc = wmi_pcp_stop(wil);
index 8d4bc4bfb6643a64609b2e06c3b303fa72fc27fa..a868c5eebe37d73a0c4e140473fb2f3e02169278 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/seq_file.h>
 #include <linux/pci.h>
 #include <linux/rtnetlink.h>
+#include <linux/power_supply.h>
 
 #include "wil6210.h"
 #include "txrx.h"
@@ -69,14 +70,32 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
 
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
                struct vring *vring = &(wil->vring_tx[i]);
+               struct vring_tx_data *txdata = &wil->vring_tx_data[i];
+
                if (vring->va) {
                        int cid = wil->vring2cid_tid[i][0];
                        int tid = wil->vring2cid_tid[i][1];
+                       u32 swhead = vring->swhead;
+                       u32 swtail = vring->swtail;
+                       int used = (vring->size + swhead - swtail)
+                                  % vring->size;
+                       int avail = vring->size - used - 1;
                        char name[10];
+                       /* performance monitoring */
+                       cycles_t now = get_cycles();
+                       cycles_t idle = txdata->idle * 100;
+                       cycles_t total = now - txdata->begin;
+
+                       do_div(idle, total);
+                       txdata->begin = now;
+                       txdata->idle = 0ULL;
+
                        snprintf(name, sizeof(name), "tx_%2d", i);
 
-                       seq_printf(s, "\n%pM CID %d TID %d\n",
-                                  wil->sta[cid].addr, cid, tid);
+                       seq_printf(s, "\n%pM CID %d TID %d [%3d|%3d] idle %3d%%\n",
+                                  wil->sta[cid].addr, cid, tid, used, avail,
+                                  (int)idle);
+
                        wil_print_vring(s, wil, name, vring, '_', 'H');
                }
        }
@@ -231,6 +250,26 @@ static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
                                   &fops_iomem_x32);
 }
 
+static int wil_debugfs_ulong_set(void *data, u64 val)
+{
+       *(ulong *)data = val;
+       return 0;
+}
+static int wil_debugfs_ulong_get(void *data, u64 *val)
+{
+       *val = *(ulong *)data;
+       return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
+                       wil_debugfs_ulong_set, "%llu\n");
+
+static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
+                                              struct dentry *parent,
+                                              ulong *value)
+{
+       return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong);
+}
+
 static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
                                      const char *name,
                                      struct dentry *parent, u32 off)
@@ -284,11 +323,11 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
        if (IS_ERR_OR_NULL(d))
                return -ENODEV;
 
-       wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+       wil_debugfs_create_iomem_x32("TRSH", S_IRUGO | S_IWUSR, d, wil->csr +
                                     HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
-       wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+       wil_debugfs_create_iomem_x32("DATA", S_IRUGO | S_IWUSR, d, wil->csr +
                                     HOSTADDR(RGF_DMA_ITR_CNT_DATA));
-       wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+       wil_debugfs_create_iomem_x32("CTL", S_IRUGO | S_IWUSR, d, wil->csr +
                                     HOSTADDR(RGF_DMA_ITR_CNT_CRL));
 
        return 0;
@@ -397,6 +436,124 @@ static const struct file_operations fops_reset = {
        .write = wil_write_file_reset,
        .open  = simple_open,
 };
+/*---write channel 1..4 to rxon for it, 0 to rxoff---*/
+static ssize_t wil_write_file_rxon(struct file *file, const char __user *buf,
+                                  size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       int rc;
+       long channel;
+       bool on;
+
+       char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+       if (!kbuf)
+               return -ENOMEM;
+       if (copy_from_user(kbuf, buf, len))
+               return -EIO;
+
+       kbuf[len] = '\0';
+       rc = kstrtol(kbuf, 0, &channel);
+       kfree(kbuf);
+       if (rc)
+               return rc;
+
+       if ((channel < 0) || (channel > 4)) {
+               wil_err(wil, "Invalid channel %ld\n", channel);
+               return -EINVAL;
+       }
+       on = !!channel;
+
+       if (on) {
+               rc = wmi_set_channel(wil, (int)channel);
+               if (rc)
+                       return rc;
+       }
+
+       rc = wmi_rxon(wil, on);
+       if (rc)
+               return rc;
+
+       return len;
+}
+
+static const struct file_operations fops_rxon = {
+       .write = wil_write_file_rxon,
+       .open  = simple_open,
+};
+/*---tx_mgmt---*/
+/* Write mgmt frame to this file to send it */
+static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
+                                    size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       struct cfg80211_mgmt_tx_params params;
+       int rc;
+
+       void *frame = kmalloc(len, GFP_KERNEL);
+       if (!frame)
+               return -ENOMEM;
+
+       if (copy_from_user(frame, buf, len))
+               return -EIO;
+
+       params.buf = frame;
+       params.len = len;
+       params.chan = wdev->preset_chandef.chan;
+
+       rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
+
+       kfree(frame);
+       wil_info(wil, "%s() -> %d\n", __func__, rc);
+
+       return len;
+}
+
+static const struct file_operations fops_txmgmt = {
+       .write = wil_write_file_txmgmt,
+       .open  = simple_open,
+};
+
+/* Write WMI command (w/o mbox header) to this file to send it
+ * WMI starts from wil6210_mbox_hdr_wmi header
+ */
+static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
+                                 size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wil6210_mbox_hdr_wmi *wmi;
+       void *cmd;
+       int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+       u16 cmdid;
+       int rc, rc1;
+
+       if (cmdlen <= 0)
+               return -EINVAL;
+
+       wmi = kmalloc(len, GFP_KERNEL);
+       if (!wmi)
+               return -ENOMEM;
+
+       rc = simple_write_to_buffer(wmi, len, ppos, buf, len);
+       if (rc < 0)
+               return rc;
+
+       cmd = &wmi[1];
+       cmdid = le16_to_cpu(wmi->id);
+
+       rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
+       kfree(wmi);
+
+       wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+
+       return rc;
+}
+
+static const struct file_operations fops_wmi = {
+       .write = wil_write_file_wmi,
+       .open  = simple_open,
+};
 
 static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
                            const char *prefix)
@@ -600,8 +757,8 @@ static int wil_temp_debugfs_show(struct seq_file *s, void *data)
                return 0;
        }
 
-       print_temp(s, "MAC temperature   :", t_m);
-       print_temp(s, "Radio temperature :", t_r);
+       print_temp(s, "T_mac   =", t_m);
+       print_temp(s, "T_radio =", t_r);
 
        return 0;
 }
@@ -618,6 +775,130 @@ static const struct file_operations fops_temp = {
        .llseek         = seq_lseek,
 };
 
+/*---------freq------------*/
+static int wil_freq_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0;
+
+       seq_printf(s, "Freq = %d\n", freq);
+
+       return 0;
+}
+
+static int wil_freq_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_freq_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_freq = {
+       .open           = wil_freq_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+/*---------link------------*/
+static int wil_link_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       struct station_info sinfo;
+       int i, rc;
+
+       for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+               struct wil_sta_info *p = &wil->sta[i];
+               char *status = "unknown";
+               switch (p->status) {
+               case wil_sta_unused:
+                       status = "unused   ";
+                       break;
+               case wil_sta_conn_pending:
+                       status = "pending  ";
+                       break;
+               case wil_sta_connected:
+                       status = "connected";
+                       break;
+               }
+               seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+                          (p->data_port_open ? " data_port_open" : ""));
+
+               if (p->status == wil_sta_connected) {
+                       rc = wil_cid_fill_sinfo(wil, i, &sinfo);
+                       if (rc)
+                               return rc;
+
+                       seq_printf(s, "  Tx_mcs = %d\n", sinfo.txrate.mcs);
+                       seq_printf(s, "  Rx_mcs = %d\n", sinfo.rxrate.mcs);
+                       seq_printf(s, "  SQ     = %d\n", sinfo.signal);
+               }
+       }
+
+       return 0;
+}
+
+static int wil_link_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_link_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_link = {
+       .open           = wil_link_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+/*---------info------------*/
+static int wil_info_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       struct net_device *ndev = wil_to_ndev(wil);
+       int is_ac = power_supply_is_system_supplied();
+       int rx = atomic_xchg(&wil->isr_count_rx, 0);
+       int tx = atomic_xchg(&wil->isr_count_tx, 0);
+       static ulong rxf_old, txf_old;
+       ulong rxf = ndev->stats.rx_packets;
+       ulong txf = ndev->stats.tx_packets;
+       unsigned int i;
+
+       /* >0 : AC; 0 : battery; <0 : error */
+       seq_printf(s, "AC powered : %d\n", is_ac);
+       seq_printf(s, "Rx irqs:packets : %8d : %8ld\n", rx, rxf - rxf_old);
+       seq_printf(s, "Tx irqs:packets : %8d : %8ld\n", tx, txf - txf_old);
+       rxf_old = rxf;
+       txf_old = txf;
+
+
+#define CHECK_QSTATE(x) (state & BIT(__QUEUE_STATE_ ## x)) ? \
+       " " __stringify(x) : ""
+
+       for (i = 0; i < ndev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(ndev, i);
+               unsigned long state = txq->state;
+
+               seq_printf(s, "Tx queue[%i] state : 0x%lx%s%s%s\n", i, state,
+                          CHECK_QSTATE(DRV_XOFF),
+                          CHECK_QSTATE(STACK_XOFF),
+                          CHECK_QSTATE(FROZEN)
+                         );
+       }
+#undef CHECK_QSTATE
+       return 0;
+}
+
+static int wil_info_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_info_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_info = {
+       .open           = wil_info_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
 /*---------Station matrix------------*/
 static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
 {
@@ -630,7 +911,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
                else
                        seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
        }
-       seq_puts(s, "]\n");
+       seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
 }
 
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
@@ -703,6 +984,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
        debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
        debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
                           &wil->secure_pcp);
+       wil_debugfs_create_ulong("status", S_IRUGO | S_IWUSR, dbg,
+                                &wil->status);
 
        wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
                                   HOSTADDR(RGF_USER_USER_ICR));
@@ -719,7 +1002,13 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
        debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
 
        debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+       debugfs_create_file("rxon", S_IWUSR, dbg, wil, &fops_rxon);
+       debugfs_create_file("tx_mgmt", S_IWUSR, dbg, wil, &fops_txmgmt);
+       debugfs_create_file("wmi_send", S_IWUSR, dbg, wil, &fops_wmi);
        debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
+       debugfs_create_file("freq", S_IRUGO, dbg, wil, &fops_freq);
+       debugfs_create_file("link", S_IRUGO, dbg, wil, &fops_link);
+       debugfs_create_file("info", S_IRUGO, dbg, wil, &fops_info);
 
        wil->rgf_blob.data = (void * __force)wil->csr + 0;
        wil->rgf_blob.size = 0xa000;
index 73593aa3cd9813e2bd6849b969427f83f1a9a578..67f1002a03a13a88d7224a3a8abea8f234aa9ae2 100644 (file)
@@ -208,6 +208,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 
        /* Rx IRQ will be enabled when NAPI processing finished */
 
+       atomic_inc(&wil->isr_count_rx);
        return IRQ_HANDLED;
 }
 
@@ -246,6 +247,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 
        /* Tx IRQ will be enabled when NAPI processing finished */
 
+       atomic_inc(&wil->isr_count_tx);
        return IRQ_HANDLED;
 }
 
@@ -257,6 +259,7 @@ static void wil_notify_fw_error(struct wil6210_priv *wil)
                [1] = "EVENT=FW_ERROR",
                [2] = NULL,
        };
+       wil_err(wil, "Notify about firmware error\n");
        kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
 }
 
index 11e6d9d22eae47faa34a51c813238f2acd1a1eea..53a689ed7c7d0ba053d652c4b696639639b617d7 100644 (file)
@@ -61,11 +61,24 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
 static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
 {
        uint i;
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
        struct wil_sta_info *sta = &wil->sta[cid];
+       wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
+                    sta->status);
 
        sta->data_port_open = false;
        if (sta->status != wil_sta_unused) {
                wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+               switch (wdev->iftype) {
+               case NL80211_IFTYPE_AP:
+               case NL80211_IFTYPE_P2P_GO:
+                       /* AP-like interface */
+                       cfg80211_del_sta(ndev, sta->addr, GFP_KERNEL);
+                       break;
+               default:
+                       break;
+               }
                sta->status = wil_sta_unused;
        }
 
@@ -119,11 +132,6 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid)
                clear_bit(wil_status_fwconnecting, &wil->status);
                break;
        default:
-               /* AP-like interface and monitor:
-                * never scan, always connected
-                */
-               if (bssid)
-                       cfg80211_del_sta(ndev, bssid, GFP_KERNEL);
                break;
        }
 }
@@ -465,6 +473,7 @@ void wil_link_on(struct wil6210_priv *wil)
        wil_dbg_misc(wil, "%s()\n", __func__);
 
        netif_carrier_on(ndev);
+       wil_dbg_misc(wil, "netif_tx_wake : link on\n");
        netif_tx_wake_all_queues(ndev);
 }
 
@@ -475,6 +484,7 @@ void wil_link_off(struct wil6210_priv *wil)
        wil_dbg_misc(wil, "%s()\n", __func__);
 
        netif_tx_stop_all_queues(ndev);
+       wil_dbg_misc(wil, "netif_tx_stop : link off\n");
        netif_carrier_off(ndev);
 }
 
@@ -552,6 +562,8 @@ static int __wil_down(struct wil6210_priv *wil)
        napi_disable(&wil->napi_tx);
 
        if (wil->scan_request) {
+               wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+                            wil->scan_request);
                del_timer_sync(&wil->scan_timer);
                cfg80211_scan_done(wil->scan_request, true);
                wil->scan_request = NULL;
index 106b6dcb773a35fe3e57eba85423f4e2135ae24c..7afce6e8c5078fb92257a64f6c1e3fabaa65298c 100644 (file)
@@ -132,7 +132,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
        ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
        cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
 
-       ndev = alloc_netdev(0, "wlan%d", ether_setup);
+       ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
        if (!ndev) {
                dev_err(dev, "alloc_netdev_mqs failed\n");
                rc = -ENOMEM;
index 1e2e07b9d13d9d6b2957849b4792679c77a6dcf5..77b6272d93fb24c11e9d703549e03929f623c6ca 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/debugfs.h>
 #include <linux/pci.h>
 #include <linux/moduleparam.h>
 
@@ -27,11 +26,22 @@ MODULE_PARM_DESC(use_msi,
                 " Use MSI interrupt: "
                 "0 - don't, 1 - (default) - single, or 3");
 
+static bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
+
 /* Bus ops */
 static int wil_if_pcie_enable(struct wil6210_priv *wil)
 {
        struct pci_dev *pdev = wil->pdev;
        int rc;
+       /* on platforms with buggy ACPI, pdev->msi_enabled may be set to
+        * allow pci_enable_device to work. This indicates INTx was not routed
+        * and only MSI should be used
+        */
+       int msi_only = pdev->msi_enabled;
+
+       pdev->msi_enabled = 0;
 
        pci_set_master(pdev);
 
@@ -63,6 +73,12 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
 
        wil->n_msi = use_msi;
 
+       if ((wil->n_msi == 0) && msi_only) {
+               wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
+               rc = -ENODEV;
+               goto stop_master;
+       }
+
        rc = wil6210_init_irq(wil, pdev->irq);
        if (rc)
                goto stop_master;
@@ -71,6 +87,8 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        mutex_lock(&wil->mutex);
        rc = wil_reset(wil);
        mutex_unlock(&wil->mutex);
+       if (debug_fw)
+               rc = 0;
        if (rc)
                goto release_irq;
 
@@ -119,9 +137,16 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        rc = pci_enable_device(pdev);
        if (rc) {
-               dev_err(&pdev->dev, "pci_enable_device failed\n");
-               return -ENODEV;
+               dev_err(&pdev->dev,
+                       "pci_enable_device failed, retry with MSI only\n");
+               /* Work around for platforms that can't allocate IRQ:
+                * retry with MSI only
+                */
+               pdev->msi_enabled = 1;
+               rc = pci_enable_device(pdev);
        }
+       if (rc)
+               return -ENODEV;
        /* rollback to err_disable_pdev */
 
        rc = pci_request_region(pdev, 0, WIL_NAME);
index 747ae127587763d0bbdd652f674749960cadb1f8..180ca4793904a37d7723198f5652b02e95142430 100644 (file)
@@ -116,6 +116,7 @@ void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
 
        /* frame with out of date sequence number */
        if (seq_less(seq, r->head_seq_num)) {
+               r->ssn_last_drop = seq;
                dev_kfree_skb(skb);
                goto out;
        }
index 0784ef3d4ce2795b68091cef9353e47d7712239b..af4b93e4beb5e80e9412785d60a17ebbe95e790a 100644 (file)
@@ -525,6 +525,17 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                ndev->stats.rx_bytes += len;
                stats->rx_bytes += len;
        }
+       {
+               static const char * const gro_res_str[] = {
+                       [GRO_MERGED]            = "GRO_MERGED",
+                       [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
+                       [GRO_HELD]              = "GRO_HELD",
+                       [GRO_NORMAL]            = "GRO_NORMAL",
+                       [GRO_DROP]              = "GRO_DROP",
+               };
+               wil_dbg_txrx(wil, "Rx complete %d bytes => %s,\n",
+                            len, gro_res_str[rc]);
+       }
 }
 
 /**
@@ -760,7 +771,7 @@ static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
                goto found;
        }
 
-       wil_err(wil, "Tx while no vrings active?\n");
+       wil_dbg_txrx(wil, "Tx while no vrings active?\n");
 
        return NULL;
 
@@ -881,6 +892,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        int nr_frags = skb_shinfo(skb)->nr_frags;
        uint f = 0;
        int vring_index = vring - wil->vring_tx;
+       struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
        uint i = swhead;
        dma_addr_t pa;
 
@@ -953,6 +965,9 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
 
+       if (wil_vring_is_empty(vring)) /* performance monitoring */
+               txdata->idle += get_cycles() - txdata->last_idle;
+
        /* advance swhead */
        wil_vring_advance_head(vring, nr_frags + 1);
        wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
@@ -1016,15 +1031,17 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                vring = wil_tx_bcast(wil, skb);
        }
        if (!vring) {
-               wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest);
+               wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
                goto drop;
        }
        /* set up vring entry */
        rc = wil_tx_vring(wil, vring, skb);
 
        /* do we still have enough room in the vring? */
-       if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))
+       if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) {
                netif_tx_stop_all_queues(wil_to_ndev(wil));
+               wil_dbg_txrx(wil, "netif_tx_stop : ring full\n");
+       }
 
        switch (rc) {
        case 0:
@@ -1132,8 +1149,16 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                        done++;
                }
        }
-       if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring))
+
+       if (wil_vring_is_empty(vring)) { /* performance monitoring */
+               wil_dbg_txrx(wil, "Ring[%2d] empty\n", ringid);
+               txdata->last_idle = get_cycles();
+       }
+
+       if (wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring)) {
+               wil_dbg_txrx(wil, "netif_tx_wake : ring not full\n");
                netif_tx_wake_all_queues(wil_to_ndev(wil));
+       }
 
        return done;
 }
index e25edc52398fed91bce54cb0b3fb723a9d1ff443..424906635f05dc503b5718c8e8b0f3863e500ae0 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/netdevice.h>
 #include <linux/wireless.h>
 #include <net/cfg80211.h>
+#include <linux/timex.h>
 
 #define WIL_NAME "wil6210"
 
@@ -251,7 +252,7 @@ struct vring {
  */
 struct vring_tx_data {
        int enabled;
-
+       cycles_t idle, last_idle, begin;
 };
 
 enum { /* for wil6210_priv.status */
@@ -303,6 +304,7 @@ struct wil_tid_ampdu_rx {
        u16 ssn;
        u16 buf_size;
        u16 timeout;
+       u16 ssn_last_drop;
        u8 dialog_token;
        bool first_time; /* is it 1-st time this buffer used? */
 };
@@ -410,6 +412,7 @@ struct wil6210_priv {
        struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
        /* statistics */
        struct wil6210_stats stats;
+       atomic_t isr_count_rx, isr_count_tx;
        /* debugfs */
        struct dentry *debug;
        struct debugfs_blob_wrapper fw_code_blob;
@@ -504,9 +507,14 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil6210_disable_irq(struct wil6210_priv *wil);
 void wil6210_enable_irq(struct wil6210_priv *wil);
+int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+                        struct cfg80211_mgmt_tx_params *params,
+                        u64 *cookie);
 
 int wil6210_debugfs_init(struct wil6210_priv *wil);
 void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+                      struct station_info *sinfo);
 
 struct wireless_dev *wil_cfg80211_init(struct device *dev);
 void wil_wdev_free(struct wil6210_priv *wil);
index 6cc0e182cc703c1f0c8ad243a008f8f6129590a6..a136dab560e22c94e7c30682d56cbfbb9144745c 100644 (file)
@@ -75,6 +75,7 @@ static const struct {
        {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
        {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
        {0x880000, 0x88a000, 0x880000}, /* various RGF */
+       {0x88b000, 0x88c000, 0x88b000}, /* Pcie_ext_rgf */
        {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
        /*
         * 920000..930000 ucode code RAM
@@ -327,6 +328,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
 
        if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
                struct cfg80211_bss *bss;
+               u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+               u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+               u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+               const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+               size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+                                                u.beacon.variable);
+               wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+               wil_dbg_wmi(wil, "TSF : 0x%016llx\n", tsf);
+               wil_dbg_wmi(wil, "Beacon interval : %d\n", bi);
+               wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
+                                ie_len, true);
 
                bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
                                                d_len, signal, GFP_KERNEL);
@@ -351,6 +363,9 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                bool aborted = (data->status != WMI_SCAN_SUCCESS);
 
                wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+               wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n",
+                            wil->scan_request, aborted);
+
                del_timer_sync(&wil->scan_timer);
                cfg80211_scan_done(wil->scan_request, aborted);
                wil->scan_request = NULL;
@@ -668,14 +683,12 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
 
        for (n = 0;; n++) {
                u16 len;
+               bool q;
 
                r->head = ioread32(wil->csr + HOST_MBOX +
                                   offsetof(struct wil6210_mbox_ctl, rx.head));
-               if (r->tail == r->head) {
-                       if (n == 0)
-                               wil_dbg_wmi(wil, "No events?\n");
-                       return;
-               }
+               if (r->tail == r->head)
+                       break;
 
                wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n",
                            r->head, r->tail);
@@ -684,14 +697,14 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                                     sizeof(struct wil6210_mbox_ring_desc));
                if (d_tail.sync == 0) {
                        wil_err(wil, "Mbox evt not owned by FW?\n");
-                       return;
+                       break;
                }
 
                /* read cmd header from descriptor */
                if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
                        wil_err(wil, "Mbox evt at 0x%08x?\n",
                                le32_to_cpu(d_tail.addr));
-                       return;
+                       break;
                }
                len = le16_to_cpu(hdr.len);
                wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
@@ -705,7 +718,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                                             event.wmi) + len, 4),
                              GFP_KERNEL);
                if (!evt)
-                       return;
+                       break;
 
                evt->event.hdr = hdr;
                cmd = (void *)&evt->event.wmi;
@@ -737,14 +750,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                spin_lock_irqsave(&wil->wmi_ev_lock, flags);
                list_add_tail(&evt->list, &wil->pending_wmi_ev);
                spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
-               {
-                       int q = queue_work(wil->wmi_wq,
-                                          &wil->wmi_event_worker);
-                       wil_dbg_wmi(wil, "queue_work -> %d\n", q);
-               }
+               q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
+               wil_dbg_wmi(wil, "queue_work -> %d\n", q);
        }
-       if (n > 1)
-               wil_dbg_wmi(wil, "%s -> %d events processed\n", __func__, n);
+       /* normally, 1 event per IRQ should be processed */
+       wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n);
 }
 
 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
index 40fd9b7b14269eb52d2440b09b1d6c6bac17f946..d4c6ae3a92107c86c2417fd064fe09af04310f0f 100644 (file)
@@ -122,6 +122,15 @@ config B43_PIO
        select SSB_BLOCKIO
        default y
 
+config B43_PHY_G
+       bool "Support for G-PHY (802.11g) devices"
+       depends on B43 && B43_SSB
+       default y
+       ---help---
+         This PHY type can be found in the following chipsets:
+         PCI: BCM4306, BCM4311, BCM4318
+         SoC: BCM4712, BCM5352E
+
 config B43_PHY_N
        bool "Support for 802.11n (N-PHY) devices"
        depends on B43
index 098fe9ee7096958a73093c2cbe18d7b37216e556..6e00b8804ada1f2a1274a5af33aef5540aa85ecc 100644 (file)
@@ -1,13 +1,11 @@
 b43-y                          += main.o
 b43-y                          += bus.o
-b43-y                          += tables.o
+b43-$(CONFIG_B43_PHY_G)                += phy_a.o phy_g.o tables.o lo.o wa.o
 b43-$(CONFIG_B43_PHY_N)                += tables_nphy.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2055.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2056.o
 b43-$(CONFIG_B43_PHY_N)                += radio_2057.o
 b43-y                          += phy_common.o
-b43-y                          += phy_g.o
-b43-y                          += phy_a.o
 b43-$(CONFIG_B43_PHY_N)                += phy_n.o
 b43-$(CONFIG_B43_PHY_LP)       += phy_lp.o
 b43-$(CONFIG_B43_PHY_LP)       += tables_lpphy.o
@@ -17,8 +15,6 @@ b43-$(CONFIG_B43_PHY_HT)      += radio_2059.o
 b43-$(CONFIG_B43_PHY_LCN)      += phy_lcn.o tables_phy_lcn.o
 b43-y                          += sysfs.o
 b43-y                          += xmit.o
-b43-y                          += lo.o
-b43-y                          += wa.o
 b43-y                          += dma.o
 b43-y                          += pio.o
 b43-y                          += rfkill.o
index 0d6a0bb1f876c3089c13448921b34339da3b8b67..15aaeb132a327eededc5cd9cee7f782a231d9048 100644 (file)
@@ -122,7 +122,11 @@ static const struct bcma_device_id b43_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1E, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x28, BCMA_ANY_CLASS),
+       BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x2A, BCMA_ANY_CLASS),
        BCMA_CORETABLE_END
 };
 MODULE_DEVICE_TABLE(bcma, b43_bcma_tbl);
@@ -2201,52 +2205,82 @@ err_format:
        return -EPROTO;
 }
 
+/* http://bcm-v4.sipsolutions.net/802.11/Init/Firmware */
 static int b43_try_request_fw(struct b43_request_fw_context *ctx)
 {
        struct b43_wldev *dev = ctx->dev;
        struct b43_firmware *fw = &ctx->dev->fw;
+       struct b43_phy *phy = &dev->phy;
        const u8 rev = ctx->dev->dev->core_rev;
        const char *filename;
-       u32 tmshigh;
        int err;
 
-       /* Files for HT and LCN were found by trying one by one */
-
        /* Get microcode */
-       if ((rev >= 5) && (rev <= 10)) {
-               filename = "ucode5";
-       } else if ((rev >= 11) && (rev <= 12)) {
-               filename = "ucode11";
-       } else if (rev == 13) {
-               filename = "ucode13";
-       } else if (rev == 14) {
-               filename = "ucode14";
-       } else if (rev == 15) {
+       filename = NULL;
+       switch (rev) {
+       case 42:
+               if (phy->type == B43_PHYTYPE_AC)
+                       filename = "ucode42";
+               break;
+       case 40:
+               if (phy->type == B43_PHYTYPE_AC)
+                       filename = "ucode40";
+               break;
+       case 33:
+               if (phy->type == B43_PHYTYPE_LCN40)
+                       filename = "ucode33_lcn40";
+               break;
+       case 30:
+               if (phy->type == B43_PHYTYPE_N)
+                       filename = "ucode30_mimo";
+               break;
+       case 29:
+               if (phy->type == B43_PHYTYPE_HT)
+                       filename = "ucode29_mimo";
+               break;
+       case 26:
+               if (phy->type == B43_PHYTYPE_HT)
+                       filename = "ucode26_mimo";
+               break;
+       case 28:
+       case 25:
+               if (phy->type == B43_PHYTYPE_N)
+                       filename = "ucode25_mimo";
+               else if (phy->type == B43_PHYTYPE_LCN)
+                       filename = "ucode25_lcn";
+               break;
+       case 24:
+               if (phy->type == B43_PHYTYPE_LCN)
+                       filename = "ucode24_lcn";
+               break;
+       case 23:
+               if (phy->type == B43_PHYTYPE_N)
+                       filename = "ucode16_mimo";
+               break;
+       case 16 ... 19:
+               if (phy->type == B43_PHYTYPE_N)
+                       filename = "ucode16_mimo";
+               else if (phy->type == B43_PHYTYPE_LP)
+                       filename = "ucode16_lp";
+               break;
+       case 15:
                filename = "ucode15";
-       } else {
-               switch (dev->phy.type) {
-               case B43_PHYTYPE_N:
-                       if (rev >= 16)
-                               filename = "ucode16_mimo";
-                       else
-                               goto err_no_ucode;
-                       break;
-               case B43_PHYTYPE_HT:
-                       if (rev == 29)
-                               filename = "ucode29_mimo";
-                       else
-                               goto err_no_ucode;
-                       break;
-               case B43_PHYTYPE_LCN:
-                       if (rev == 24)
-                               filename = "ucode24_mimo";
-                       else
-                               goto err_no_ucode;
-                       break;
-               default:
-                       goto err_no_ucode;
-               }
+               break;
+       case 14:
+               filename = "ucode14";
+               break;
+       case 13:
+               filename = "ucode13";
+               break;
+       case 11 ... 12:
+               filename = "ucode11";
+               break;
+       case 5 ... 10:
+               filename = "ucode5";
+               break;
        }
+       if (!filename)
+               goto err_no_ucode;
        err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
        if (err)
                goto err_load;
@@ -2268,117 +2302,121 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
                goto err_load;
 
        /* Get initvals */
+       filename = NULL;
        switch (dev->phy.type) {
-       case B43_PHYTYPE_A:
-               if ((rev >= 5) && (rev <= 10)) {
-                       tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
-                       if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
-                               filename = "a0g1initvals5";
-                       else
-                               filename = "a0g0initvals5";
-               } else
-                       goto err_no_initvals;
-               break;
        case B43_PHYTYPE_G:
-               if ((rev >= 5) && (rev <= 10))
-                       filename = "b0g0initvals5";
-               else if (rev >= 13)
+               if (rev == 13)
                        filename = "b0g0initvals13";
-               else
-                       goto err_no_initvals;
+               else if (rev >= 5 && rev <= 10)
+                       filename = "b0g0initvals5";
                break;
        case B43_PHYTYPE_N:
-               if (rev >= 16)
+               if (rev == 30)
+                       filename = "n16initvals30";
+               else if (rev == 28 || rev == 25)
+                       filename = "n0initvals25";
+               else if (rev == 24)
+                       filename = "n0initvals24";
+               else if (rev == 23)
+                       filename = "n0initvals16"; /* What about n0initvals22? */
+               else if (rev >= 16 && rev <= 18)
                        filename = "n0initvals16";
-               else if ((rev >= 11) && (rev <= 12))
+               else if (rev >= 11 && rev <= 12)
                        filename = "n0initvals11";
-               else
-                       goto err_no_initvals;
                break;
        case B43_PHYTYPE_LP:
-               if (rev == 13)
-                       filename = "lp0initvals13";
+               if (rev >= 16 && rev <= 18)
+                       filename = "lp0initvals16";
+               else if (rev == 15)
+                       filename = "lp0initvals15";
                else if (rev == 14)
                        filename = "lp0initvals14";
-               else if (rev >= 15)
-                       filename = "lp0initvals15";
-               else
-                       goto err_no_initvals;
+               else if (rev == 13)
+                       filename = "lp0initvals13";
                break;
        case B43_PHYTYPE_HT:
                if (rev == 29)
                        filename = "ht0initvals29";
-               else
-                       goto err_no_initvals;
+               else if (rev == 26)
+                       filename = "ht0initvals26";
                break;
        case B43_PHYTYPE_LCN:
                if (rev == 24)
                        filename = "lcn0initvals24";
-               else
-                       goto err_no_initvals;
                break;
-       default:
-               goto err_no_initvals;
+       case B43_PHYTYPE_LCN40:
+               if (rev == 33)
+                       filename = "lcn400initvals33";
+               break;
+       case B43_PHYTYPE_AC:
+               if (rev == 42)
+                       filename = "ac1initvals42";
+               else if (rev == 40)
+                       filename = "ac0initvals40";
+               break;
        }
+       if (!filename)
+               goto err_no_initvals;
        err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
        if (err)
                goto err_load;
 
        /* Get bandswitch initvals */
+       filename = NULL;
        switch (dev->phy.type) {
-       case B43_PHYTYPE_A:
-               if ((rev >= 5) && (rev <= 10)) {
-                       tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
-                       if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY)
-                               filename = "a0g1bsinitvals5";
-                       else
-                               filename = "a0g0bsinitvals5";
-               } else if (rev >= 11)
-                       filename = NULL;
-               else
-                       goto err_no_initvals;
-               break;
        case B43_PHYTYPE_G:
-               if ((rev >= 5) && (rev <= 10))
+               if (rev == 13)
+                       filename = "b0g0bsinitvals13";
+               else if (rev >= 5 && rev <= 10)
                        filename = "b0g0bsinitvals5";
-               else if (rev >= 11)
-                       filename = NULL;
-               else
-                       goto err_no_initvals;
                break;
        case B43_PHYTYPE_N:
-               if (rev >= 16)
+               if (rev == 30)
+                       filename = "n16bsinitvals30";
+               else if (rev == 28 || rev == 25)
+                       filename = "n0bsinitvals25";
+               else if (rev == 24)
+                       filename = "n0bsinitvals24";
+               else if (rev == 23)
+                       filename = "n0bsinitvals16"; /* What about n0bsinitvals22? */
+               else if (rev >= 16 && rev <= 18)
                        filename = "n0bsinitvals16";
-               else if ((rev >= 11) && (rev <= 12))
+               else if (rev >= 11 && rev <= 12)
                        filename = "n0bsinitvals11";
-               else
-                       goto err_no_initvals;
                break;
        case B43_PHYTYPE_LP:
-               if (rev == 13)
-                       filename = "lp0bsinitvals13";
+               if (rev >= 16 && rev <= 18)
+                       filename = "lp0bsinitvals16";
+               else if (rev == 15)
+                       filename = "lp0bsinitvals15";
                else if (rev == 14)
                        filename = "lp0bsinitvals14";
-               else if (rev >= 15)
-                       filename = "lp0bsinitvals15";
-               else
-                       goto err_no_initvals;
+               else if (rev == 13)
+                       filename = "lp0bsinitvals13";
                break;
        case B43_PHYTYPE_HT:
                if (rev == 29)
                        filename = "ht0bsinitvals29";
-               else
-                       goto err_no_initvals;
+               else if (rev == 26)
+                       filename = "ht0bsinitvals26";
                break;
        case B43_PHYTYPE_LCN:
                if (rev == 24)
                        filename = "lcn0bsinitvals24";
-               else
-                       goto err_no_initvals;
                break;
-       default:
-               goto err_no_initvals;
+       case B43_PHYTYPE_LCN40:
+               if (rev == 33)
+                       filename = "lcn400bsinitvals33";
+               break;
+       case B43_PHYTYPE_AC:
+               if (rev == 42)
+                       filename = "ac1bsinitvals42";
+               else if (rev == 40)
+                       filename = "ac0bsinitvals40";
+               break;
        }
+       if (!filename)
+               goto err_no_initvals;
        err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
        if (err)
                goto err_load;
@@ -3798,38 +3836,29 @@ static void b43_set_retry_limits(struct b43_wldev *dev,
 static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
-       struct b43_wldev *dev;
-       struct b43_phy *phy;
+       struct b43_wldev *dev = wl->current_dev;
+       struct b43_phy *phy = &dev->phy;
        struct ieee80211_conf *conf = &hw->conf;
        int antenna;
        int err = 0;
-       bool reload_bss = false;
 
        mutex_lock(&wl->mutex);
-
-       dev = wl->current_dev;
-
        b43_mac_suspend(dev);
 
-       /* Switch the band (if necessary). This might change the active core. */
-       err = b43_switch_band(dev, conf->chandef.chan);
-       if (err)
-               goto out_unlock_mutex;
-
-       /* Need to reload all settings if the core changed */
-       if (dev != wl->current_dev) {
-               dev = wl->current_dev;
-               changed = ~0;
-               reload_bss = true;
-       }
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               phy->chandef = &conf->chandef;
+               phy->channel = conf->chandef.chan->hw_value;
 
-       phy = &dev->phy;
+               /* Switch the band (if necessary). */
+               err = b43_switch_band(dev, conf->chandef.chan);
+               if (err)
+                       goto out_mac_enable;
 
-       if (conf_is_ht(conf))
-               phy->is_40mhz =
-                       (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
-       else
-               phy->is_40mhz = false;
+               /* Switch to the requested channel.
+                * The firmware takes care of races with the TX handler.
+                */
+               b43_switch_channel(dev, phy->channel);
+       }
 
        if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
                b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
@@ -3838,11 +3867,6 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
        if (!changed)
                goto out_mac_enable;
 
-       /* Switch to the requested channel.
-        * The firmware takes care of races with the TX handler. */
-       if (conf->chandef.chan->hw_value != phy->channel)
-               b43_switch_channel(dev, conf->chandef.chan->hw_value);
-
        dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
 
        /* Adjust the desired TX power level. */
@@ -3878,12 +3902,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
 
 out_mac_enable:
        b43_mac_enable(dev);
-out_unlock_mutex:
        mutex_unlock(&wl->mutex);
 
-       if (wl->vif && reload_bss)
-               b43_op_bss_info_changed(hw, wl->vif, &wl->vif->bss_conf, ~0);
-
        return err;
 }
 
@@ -4309,6 +4329,7 @@ static char *b43_phy_name(struct b43_wldev *dev, u8 phy_type)
 static int b43_phy_versioning(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
+       const u8 core_rev = dev->dev->core_rev;
        u32 tmp;
        u8 analog_type;
        u8 phy_type;
@@ -4323,20 +4344,20 @@ static int b43_phy_versioning(struct b43_wldev *dev)
        analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT;
        phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT;
        phy_rev = (tmp & B43_PHYVER_VERSION);
+
+       /* LCNXN is continuation of N which run out of revisions */
+       if (phy_type == B43_PHYTYPE_LCNXN) {
+               phy_type = B43_PHYTYPE_N;
+               phy_rev += 16;
+       }
+
        switch (phy_type) {
-       case B43_PHYTYPE_A:
-               if (phy_rev >= 4)
-                       unsupported = 1;
-               break;
-       case B43_PHYTYPE_B:
-               if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6
-                   && phy_rev != 7)
-                       unsupported = 1;
-               break;
+#ifdef CONFIG_B43_PHY_G
        case B43_PHYTYPE_G:
                if (phy_rev > 9)
                        unsupported = 1;
                break;
+#endif
 #ifdef CONFIG_B43_PHY_N
        case B43_PHYTYPE_N:
                if (phy_rev > 9)
@@ -4374,7 +4395,15 @@ static int b43_phy_versioning(struct b43_wldev *dev)
                analog_type, phy_type, b43_phy_name(dev, phy_type), phy_rev);
 
        /* Get RADIO versioning */
-       if (dev->dev->core_rev >= 24) {
+       if (core_rev == 40 || core_rev == 42) {
+               radio_manuf = 0x17F;
+
+               b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 0);
+               radio_rev = b43_read16(dev, B43_MMIO_RADIO24_DATA);
+
+               b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 1);
+               radio_ver = b43_read16(dev, B43_MMIO_RADIO24_DATA);
+       } else if (core_rev >= 24) {
                u16 radio24[3];
 
                for (tmp = 0; tmp < 3; tmp++) {
index a6c38104693d4f60276cc087aedb67aab1fc0c99..25e40432d68b9c0c9dcc25ff44f2386b56e6ca68 100644 (file)
@@ -573,7 +573,7 @@ static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
 {//TODO
 }
 
-const struct b43_phy_operations b43_phyops_a = {
+static const struct b43_phy_operations b43_phyops_a = {
        .allocate               = b43_aphy_op_allocate,
        .free                   = b43_aphy_op_free,
        .prepare_structs        = b43_aphy_op_prepare_structs,
index 5cfaab7b16ee8c1de47bec7838aa1ab0a679f485..f7d0d929a37479ae0915655691eeb3846928d80b 100644 (file)
@@ -123,8 +123,4 @@ struct b43_phy_a {
  */
 void b43_phy_inita(struct b43_wldev *dev);
 
-
-struct b43_phy_operations;
-extern const struct b43_phy_operations b43_phyops_a;
-
 #endif /* LINUX_B43_PHY_A_H_ */
index 08244b3b327e5f98f06b5d5db8943d8e624bb3a8..3cbef21b4726dc106c5746f79689a10d0db6c4e0 100644 (file)
@@ -45,11 +45,10 @@ int b43_phy_allocate(struct b43_wldev *dev)
        phy->ops = NULL;
 
        switch (phy->type) {
-       case B43_PHYTYPE_A:
-               phy->ops = &b43_phyops_a;
-               break;
        case B43_PHYTYPE_G:
+#ifdef CONFIG_B43_PHY_G
                phy->ops = &b43_phyops_g;
+#endif
                break;
        case B43_PHYTYPE_N:
 #ifdef CONFIG_B43_PHY_N
@@ -94,7 +93,13 @@ int b43_phy_init(struct b43_wldev *dev)
        const struct b43_phy_operations *ops = phy->ops;
        int err;
 
-       phy->channel = ops->get_default_chan(dev);
+       /* During PHY init we need to use some channel. On the first init this
+        * function is called *before* b43_op_config, so our pointer is NULL.
+        */
+       if (!phy->chandef) {
+               phy->chandef = &dev->wl->hw->conf.chandef;
+               phy->channel = phy->chandef->chan->hw_value;
+       }
 
        phy->ops->switch_analog(dev, true);
        b43_software_rfkill(dev, false);
@@ -106,9 +111,7 @@ int b43_phy_init(struct b43_wldev *dev)
        }
        phy->do_full_init = false;
 
-       /* Make sure to switch hardware and firmware (SHM) to
-        * the default channel. */
-       err = b43_switch_channel(dev, ops->get_default_chan(dev));
+       err = b43_switch_channel(dev, phy->channel);
        if (err) {
                b43err(dev->wl, "PHY init: Channel switch to default failed\n");
                goto err_phy_exit;
@@ -408,9 +411,6 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
        u16 channelcookie, savedcookie;
        int err;
 
-       if (new_channel == B43_DEFAULT_CHANNEL)
-               new_channel = phy->ops->get_default_chan(dev);
-
        /* First we set the channel radio code to prevent the
         * firmware from sending ghost packets.
         */
@@ -428,7 +428,6 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
        if (err)
                goto err_restore_cookie;
 
-       dev->phy.channel = new_channel;
        /* Wait for the radio to tune to the channel and stabilize. */
        msleep(8);
 
@@ -547,10 +546,9 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
 }
 
 
-bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type)
+bool b43_is_40mhz(struct b43_wldev *dev)
 {
-       return (channel_type == NL80211_CHAN_HT40MINUS ||
-               channel_type == NL80211_CHAN_HT40PLUS);
+       return dev->phy.chandef->width == NL80211_CHAN_WIDTH_40;
 }
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
index 4ad6240d9ff40e53557b4d29b27788b3eb9602d5..3912274f71e30b7040ade6fc9c7d455ab1192d6a 100644 (file)
@@ -228,9 +228,6 @@ struct b43_phy {
        bool supports_2ghz;
        bool supports_5ghz;
 
-       /* HT info */
-       bool is_40mhz;
-
        /* Is GMODE (2 GHz mode) bit enabled? */
        bool gmode;
 
@@ -267,9 +264,8 @@ struct b43_phy {
        unsigned long next_txpwr_check_time;
 
        /* Current channel */
+       struct cfg80211_chan_def *chandef;
        unsigned int channel;
-       u16 channel_freq;
-       enum nl80211_channel_type channel_type;
 
        /* PHY TX errors counter. */
        atomic_t txerr_cnt;
@@ -400,10 +396,6 @@ void b43_phy_take_out_of_reset(struct b43_wldev *dev);
  * b43_switch_channel - Switch to another channel
  */
 int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
-/**
- * B43_DEFAULT_CHANNEL - Switch to the default channel.
- */
-#define B43_DEFAULT_CHANNEL    UINT_MAX
 
 /**
  * b43_software_rfkill - Turn the radio ON or OFF in software.
@@ -454,7 +446,7 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
  */
 void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
 
-bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type);
+bool b43_is_40mhz(struct b43_wldev *dev);
 
 void b43_phy_force_clock(struct b43_wldev *dev, bool force);
 
index 5d6833f184982f67eeb79977dbb49d221ddbd216..f2974c6b1c01fa3ddd11753a74badac91bc9b43e 100644 (file)
@@ -596,7 +596,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
        u8 target[3];
        s16 a1[3], b0[3], b1[3];
 
-       u16 freq = dev->phy.channel_freq;
+       u16 freq = dev->phy.chandef->chan->center_freq;
        int i, c;
 
        if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
index 86569f6a870507c1723d95ccc6d39a2c13f449a2..50ca6f87d5e80be2ed716ae931384650a4729e0e 100644 (file)
@@ -590,7 +590,103 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
  * Radio 0x2057
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rcal */
+static void b43_radio_2057_chantab_upload(struct b43_wldev *dev,
+                                         const struct b43_nphy_chantabent_rev7 *e_r7,
+                                         const struct b43_nphy_chantabent_rev7_2g *e_r7_2g)
+{
+       if (e_r7_2g) {
+               b43_radio_write(dev, R2057_VCOCAL_COUNTVAL0, e_r7_2g->radio_vcocal_countval0);
+               b43_radio_write(dev, R2057_VCOCAL_COUNTVAL1, e_r7_2g->radio_vcocal_countval1);
+               b43_radio_write(dev, R2057_RFPLL_REFMASTER_SPAREXTALSIZE, e_r7_2g->radio_rfpll_refmaster_sparextalsize);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, e_r7_2g->radio_rfpll_loopfilter_r1);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, e_r7_2g->radio_rfpll_loopfilter_c2);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, e_r7_2g->radio_rfpll_loopfilter_c1);
+               b43_radio_write(dev, R2057_CP_KPD_IDAC, e_r7_2g->radio_cp_kpd_idac);
+               b43_radio_write(dev, R2057_RFPLL_MMD0, e_r7_2g->radio_rfpll_mmd0);
+               b43_radio_write(dev, R2057_RFPLL_MMD1, e_r7_2g->radio_rfpll_mmd1);
+               b43_radio_write(dev, R2057_VCOBUF_TUNE, e_r7_2g->radio_vcobuf_tune);
+               b43_radio_write(dev, R2057_LOGEN_MX2G_TUNE, e_r7_2g->radio_logen_mx2g_tune);
+               b43_radio_write(dev, R2057_LOGEN_INDBUF2G_TUNE, e_r7_2g->radio_logen_indbuf2g_tune);
+               b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, e_r7_2g->radio_txmix2g_tune_boost_pu_core0);
+               b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE0, e_r7_2g->radio_pad2g_tune_pus_core0);
+               b43_radio_write(dev, R2057_LNA2G_TUNE_CORE0, e_r7_2g->radio_lna2g_tune_core0);
+               b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1, e_r7_2g->radio_txmix2g_tune_boost_pu_core1);
+               b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE1, e_r7_2g->radio_pad2g_tune_pus_core1);
+               b43_radio_write(dev, R2057_LNA2G_TUNE_CORE1, e_r7_2g->radio_lna2g_tune_core1);
+
+       } else {
+               b43_radio_write(dev, R2057_VCOCAL_COUNTVAL0, e_r7->radio_vcocal_countval0);
+               b43_radio_write(dev, R2057_VCOCAL_COUNTVAL1, e_r7->radio_vcocal_countval1);
+               b43_radio_write(dev, R2057_RFPLL_REFMASTER_SPAREXTALSIZE, e_r7->radio_rfpll_refmaster_sparextalsize);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, e_r7->radio_rfpll_loopfilter_r1);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, e_r7->radio_rfpll_loopfilter_c2);
+               b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, e_r7->radio_rfpll_loopfilter_c1);
+               b43_radio_write(dev, R2057_CP_KPD_IDAC, e_r7->radio_cp_kpd_idac);
+               b43_radio_write(dev, R2057_RFPLL_MMD0, e_r7->radio_rfpll_mmd0);
+               b43_radio_write(dev, R2057_RFPLL_MMD1, e_r7->radio_rfpll_mmd1);
+               b43_radio_write(dev, R2057_VCOBUF_TUNE, e_r7->radio_vcobuf_tune);
+               b43_radio_write(dev, R2057_LOGEN_MX2G_TUNE, e_r7->radio_logen_mx2g_tune);
+               b43_radio_write(dev, R2057_LOGEN_MX5G_TUNE, e_r7->radio_logen_mx5g_tune);
+               b43_radio_write(dev, R2057_LOGEN_INDBUF2G_TUNE, e_r7->radio_logen_indbuf2g_tune);
+               b43_radio_write(dev, R2057_LOGEN_INDBUF5G_TUNE, e_r7->radio_logen_indbuf5g_tune);
+               b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE0, e_r7->radio_txmix2g_tune_boost_pu_core0);
+               b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE0, e_r7->radio_pad2g_tune_pus_core0);
+               b43_radio_write(dev, R2057_PGA_BOOST_TUNE_CORE0, e_r7->radio_pga_boost_tune_core0);
+               b43_radio_write(dev, R2057_TXMIX5G_BOOST_TUNE_CORE0, e_r7->radio_txmix5g_boost_tune_core0);
+               b43_radio_write(dev, R2057_PAD5G_TUNE_MISC_PUS_CORE0, e_r7->radio_pad5g_tune_misc_pus_core0);
+               b43_radio_write(dev, R2057_LNA2G_TUNE_CORE0, e_r7->radio_lna2g_tune_core0);
+               b43_radio_write(dev, R2057_LNA5G_TUNE_CORE0, e_r7->radio_lna5g_tune_core0);
+               b43_radio_write(dev, R2057_TXMIX2G_TUNE_BOOST_PU_CORE1, e_r7->radio_txmix2g_tune_boost_pu_core1);
+               b43_radio_write(dev, R2057_PAD2G_TUNE_PUS_CORE1, e_r7->radio_pad2g_tune_pus_core1);
+               b43_radio_write(dev, R2057_PGA_BOOST_TUNE_CORE1, e_r7->radio_pga_boost_tune_core1);
+               b43_radio_write(dev, R2057_TXMIX5G_BOOST_TUNE_CORE1, e_r7->radio_txmix5g_boost_tune_core1);
+               b43_radio_write(dev, R2057_PAD5G_TUNE_MISC_PUS_CORE1, e_r7->radio_pad5g_tune_misc_pus_core1);
+               b43_radio_write(dev, R2057_LNA2G_TUNE_CORE1, e_r7->radio_lna2g_tune_core1);
+               b43_radio_write(dev, R2057_LNA5G_TUNE_CORE1, e_r7->radio_lna5g_tune_core1);
+       }
+}
+
+static void b43_radio_2057_setup(struct b43_wldev *dev,
+                                const struct b43_nphy_chantabent_rev7 *tabent_r7,
+                                const struct b43_nphy_chantabent_rev7_2g *tabent_r7_2g)
+{
+       struct b43_phy *phy = &dev->phy;
+
+       b43_radio_2057_chantab_upload(dev, tabent_r7, tabent_r7_2g);
+
+       switch (phy->radio_rev) {
+       case 0 ... 4:
+       case 6:
+               if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
+                       b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, 0x8);
+               } else {
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x1f);
+                       b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
+                       b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C2, 0x8);
+               }
+               break;
+       /* TODO */
+       }
+
+       /* TODO */
+
+       usleep_range(50, 100);
+
+       /* VCO calibration */
+       b43_radio_mask(dev, R2057_RFPLL_MISC_EN, ~0x01);
+       b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x04);
+       b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x4);
+       b43_radio_set(dev, R2057_RFPLL_MISC_EN, 0x01);
+       usleep_range(300, 600);
+}
+
+/* Calibrate resistors in LPF of PLL?
+ * http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
+ */
 static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -603,15 +699,25 @@ static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
                b43_radio_maskset(dev, 0x1ca, ~0x2, 0x1);
        }
 
+       /* Enable */
        b43_radio_set(dev, R2057_RCAL_CONFIG, 0x1);
        udelay(10);
-       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x3);
-       if (!b43_radio_wait_value(dev, R2057_RCCAL_N1_1, 1, 1, 100, 1000000)) {
+
+       /* Start */
+       b43_radio_set(dev, R2057_RCAL_CONFIG, 0x2);
+       usleep_range(100, 200);
+
+       /* Stop */
+       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
+
+       /* Wait and check for result */
+       if (!b43_radio_wait_value(dev, R2057_RCAL_STATUS, 1, 1, 100, 1000000)) {
                b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
                return 0;
        }
-       b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x2);
        tmp = b43_radio_read(dev, R2057_RCAL_STATUS) & 0x3E;
+
+       /* Disable */
        b43_radio_mask(dev, R2057_RCAL_CONFIG, ~0x1);
 
        if (phy->radio_rev == 5) {
@@ -627,7 +733,9 @@ static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
        return tmp & 0x3e;
 }
 
-/* http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */
+/* Calibrate the internal RC oscillator?
+ * http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
+ */
 static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -635,49 +743,76 @@ static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
                        phy->radio_rev == 6);
        u16 tmp;
 
+       /* Setup cal */
        if (special) {
                b43_radio_write(dev, R2057_RCCAL_MASTER, 0x61);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0xC0);
        } else {
-               b43_radio_write(dev, 0x1AE, 0x61);
+               b43_radio_write(dev, R2057v7_RCCAL_MASTER, 0x61);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0xE1);
        }
        b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+
+       /* Start, wait, stop */
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
-       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
                                  5000000))
                b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       usleep_range(35, 70);
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       usleep_range(70, 140);
+
+       /* Setup cal */
        if (special) {
                b43_radio_write(dev, R2057_RCCAL_MASTER, 0x69);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
        } else {
-               b43_radio_write(dev, 0x1AE, 0x69);
+               b43_radio_write(dev, R2057v7_RCCAL_MASTER, 0x69);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0xD5);
        }
        b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
+
+       /* Start, wait, stop */
+       usleep_range(35, 70);
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
-       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+       usleep_range(70, 140);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
                                  5000000))
                b43dbg(dev->wl, "Radio 0x2057 rccal timeout\n");
+       usleep_range(35, 70);
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       usleep_range(70, 140);
+
+       /* Setup cal */
        if (special) {
                b43_radio_write(dev, R2057_RCCAL_MASTER, 0x73);
                b43_radio_write(dev, R2057_RCCAL_X1, 0x28);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0xB0);
        } else {
-               b43_radio_write(dev, 0x1AE, 0x73);
+               b43_radio_write(dev, R2057v7_RCCAL_MASTER, 0x73);
                b43_radio_write(dev, R2057_RCCAL_X1, 0x6E);
                b43_radio_write(dev, R2057_RCCAL_TRC0, 0x99);
        }
+
+       /* Start, wait, stop */
+       usleep_range(35, 70);
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x55);
-       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 1, 1, 500,
+       usleep_range(70, 140);
+       if (!b43_radio_wait_value(dev, R2057_RCCAL_DONE_OSCCAP, 2, 2, 500,
                                  5000000)) {
                b43err(dev->wl, "Radio 0x2057 rcal timeout\n");
                return 0;
        }
        tmp = b43_radio_read(dev, R2057_RCCAL_DONE_OSCCAP);
+       usleep_range(35, 70);
        b43_radio_write(dev, R2057_RCCAL_START_R1_Q1_P1, 0x15);
+       usleep_range(70, 140);
+
+       if (special)
+               b43_radio_mask(dev, R2057_RCCAL_MASTER, ~0x1);
+       else
+               b43_radio_mask(dev, R2057v7_RCCAL_MASTER, ~0x1);
+
        return tmp;
 }
 
@@ -798,6 +933,7 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
 static void b43_radio_2056_setup(struct b43_wldev *dev,
                                const struct b43_nphy_channeltab_entry_rev3 *e)
 {
+       struct b43_phy *phy = &dev->phy;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
        enum ieee80211_band band = b43_current_band(dev->wl);
        u16 offset;
@@ -895,7 +1031,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                                        offset | B2056_TX_MIXG_BOOST_TUNE,
                                        mixg_boost);
                        } else {
-                               bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+                               bias = b43_is_40mhz(dev) ? 0x40 : 0x20;
                                b43_radio_write(dev,
                                        offset | B2056_TX_INTPAG_IMAIN_STAT,
                                        bias);
@@ -909,7 +1045,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
                        b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
                }
        } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
-               u16 freq = dev->phy.channel_freq;
+               u16 freq = phy->chandef->chan->center_freq;
                if (freq < 5100) {
                        paa_boost = 0xA;
                        pada_boost = 0x77;
@@ -1210,8 +1346,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
        u16 bw, len, rot, angle;
        struct b43_c32 *samples;
 
-
-       bw = (dev->phy.is_40mhz) ? 40 : 20;
+       bw = b43_is_40mhz(dev) ? 40 : 20;
        len = bw << 3;
 
        if (test) {
@@ -1220,7 +1355,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
                else
                        bw = 80;
 
-               if (dev->phy.is_40mhz)
+               if (b43_is_40mhz(dev))
                        bw <<= 1;
 
                len = bw << 1;
@@ -1248,7 +1383,8 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
 
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
 static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
-                                       u16 wait, bool iqmode, bool dac_test)
+                                u16 wait, bool iqmode, bool dac_test,
+                                bool modify_bbmult)
 {
        struct b43_phy_n *nphy = dev->phy.n;
        int i;
@@ -1262,12 +1398,10 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
                nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
        }
 
-       /* TODO: add modify_bbmult argument */
-       if (!dev->phy.is_40mhz)
-               tmp = 0x6464;
-       else
-               tmp = 0x4747;
-       b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+       if (modify_bbmult) {
+               tmp = !b43_is_40mhz(dev) ? 0x6464 : 0x4747;
+               b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+       }
 
        b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
 
@@ -1285,10 +1419,8 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
                b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
                b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
        } else {
-               if (dac_test)
-                       b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
-               else
-                       b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
+               tmp = dac_test ? 5 : 1;
+               b43_phy_write(dev, B43_NPHY_SAMP_CMD, tmp);
        }
        for (i = 0; i < 100; i++) {
                if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) {
@@ -1675,6 +1807,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
 static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 {
+       struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = dev->phy.n;
 
        u16 saved_regs_phy_rfctl[2];
@@ -1897,9 +2030,9 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 
        /* Remember for which channel we store configuration */
        if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
-               nphy->rssical_chanspec_2G.center_freq = dev->phy.channel_freq;
+               nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
        else
-               nphy->rssical_chanspec_5G.center_freq = dev->phy.channel_freq;
+               nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
 
        /* End of calibration, restore configuration */
        b43_nphy_classifier(dev, 7, class);
@@ -2192,7 +2325,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
        b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
 
-       if (!dev->phy.is_40mhz) {
+       if (!b43_is_40mhz(dev)) {
                /* Set dwell lengths */
                b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
                b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
@@ -2206,7 +2339,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
        b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
                        ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
 
-       if (!dev->phy.is_40mhz) {
+       if (!b43_is_40mhz(dev)) {
                b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
                        ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
                b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
@@ -2221,12 +2354,12 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
 
        if (nphy->gain_boost) {
                if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
-                       dev->phy.is_40mhz)
+                   b43_is_40mhz(dev))
                        code = 4;
                else
                        code = 5;
        } else {
-               code = dev->phy.is_40mhz ? 6 : 7;
+               code = b43_is_40mhz(dev) ? 6 : 7;
        }
 
        /* Set HPVGA2 index */
@@ -2298,7 +2431,7 @@ static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
 {
        if (!offset)
-               offset = (dev->phy.is_40mhz) ? 0x159 : 0x154;
+               offset = b43_is_40mhz(dev) ? 0x159 : 0x154;
        return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
 }
 
@@ -2371,13 +2504,13 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
        lpf_40 = b43_nphy_read_lpf_ctl(dev, 0x159);
        lpf_11b = b43_nphy_read_lpf_ctl(dev, 0x152);
        if (b43_nphy_ipa(dev)) {
-               if ((phy->radio_rev == 5 && phy->is_40mhz) ||
+               if ((phy->radio_rev == 5 && b43_is_40mhz(dev)) ||
                    phy->radio_rev == 7 || phy->radio_rev == 8) {
                        bcap_val = b43_radio_read(dev, 0x16b);
                        scap_val = b43_radio_read(dev, 0x16a);
                        scap_val_11b = scap_val;
                        bcap_val_11b = bcap_val;
-                       if (phy->radio_rev == 5 && phy->is_40mhz) {
+                       if (phy->radio_rev == 5 && b43_is_40mhz(dev)) {
                                scap_val_11n_20 = scap_val;
                                bcap_val_11n_20 = bcap_val;
                                scap_val_11n_40 = bcap_val_11n_40 = 0xc;
@@ -2519,7 +2652,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
                                        }
                                }
                        } else if (phy->radio_rev == 7 || phy->radio_rev == 8) {
-                               if (!phy->is_40mhz) {
+                               if (!b43_is_40mhz(dev)) {
                                        b43_radio_write(dev, 0x5F, 0x14);
                                        b43_radio_write(dev, 0xE8, 0x12);
                                } else {
@@ -2528,7 +2661,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
                                }
                        }
                } else {
-                       u16 freq = phy->channel_freq;
+                       u16 freq = phy->chandef->chan->center_freq;
                        if ((freq >= 5180 && freq <= 5230) ||
                            (freq >= 5745 && freq <= 5805)) {
                                b43_radio_write(dev, 0x7D, 0xFF);
@@ -2592,7 +2725,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
        b43_ntab_write(dev, B43_NTAB16(7, 0x123), 0x77);
        b43_ntab_write(dev, B43_NTAB16(7, 0x12A), 0x77);
 
-       if (!phy->is_40mhz) {
+       if (!b43_is_40mhz(dev)) {
                b43_ntab_write(dev, B43_NTAB32(16, 0x03), 0x18D);
                b43_ntab_write(dev, B43_NTAB32(16, 0x7F), 0x18D);
        } else {
@@ -2691,7 +2824,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
 
        b43_phy_maskset(dev, B43_NPHY_SGILTRNOFFSET, 0xF0FF, 0x0700);
 
-       if (!dev->phy.is_40mhz) {
+       if (!b43_is_40mhz(dev)) {
                b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
                b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
        } else {
@@ -2946,12 +3079,13 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
  * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
  */
 static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
-                               bool iqmode, bool dac_test)
+                           bool iqmode, bool dac_test, bool modify_bbmult)
 {
        u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
        if (samp == 0)
                return -1;
-       b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+       b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test,
+                            modify_bbmult);
        return 0;
 }
 
@@ -3114,7 +3248,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
                        b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
                                ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
 
-               if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+               if (dev->phy.rev < 2 && b43_is_40mhz(dev))
                        b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
        } else {
                b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
@@ -3168,7 +3302,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
                else if (dev->phy.rev < 2)
                        b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
 
-               if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+               if (dev->phy.rev < 2 && b43_is_40mhz(dev))
                        b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
 
                if (b43_nphy_ipa(dev)) {
@@ -3184,12 +3318,13 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
 static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
 {
+       struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = dev->phy.n;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
        u8 txpi[2], bbmult, i;
        u16 tmp, radio_gain, dac_gain;
-       u16 freq = dev->phy.channel_freq;
+       u16 freq = phy->chandef->chan->center_freq;
        u32 txgain;
        /* u32 gaintbl; rev3+ */
 
@@ -3234,7 +3369,11 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
        */
 
        for (i = 0; i < 2; i++) {
-               txgain = *(b43_nphy_get_tx_gain_table(dev) + txpi[i]);
+               const u32 *table = b43_nphy_get_tx_gain_table(dev);
+
+               if (!table)
+                       break;
+               txgain = *(table + txpi[i]);
 
                if (dev->phy.rev >= 3)
                        radio_gain = (txgain >> 16) & 0x1FFFF;
@@ -3388,7 +3527,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
                b43_nphy_rf_ctl_override(dev, 0x2000, 0, 3, false);
 
        b43_nphy_stop_playback(dev);
-       b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
+       b43_nphy_tx_tone(dev, 4000, 0, false, false, false);
        udelay(20);
        tmp = b43_nphy_poll_rssi(dev, N_RSSI_TSSI_2G, rssi, 1);
        b43_nphy_stop_playback(dev);
@@ -3439,21 +3578,21 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
                delta = 0;
                switch (stf_mode) {
                case 0:
-                       if (dev->phy.is_40mhz && dev->phy.rev >= 5) {
+                       if (b43_is_40mhz(dev) && dev->phy.rev >= 5) {
                                idx = 68;
                        } else {
                                delta = 1;
-                               idx = dev->phy.is_40mhz ? 52 : 4;
+                               idx = b43_is_40mhz(dev) ? 52 : 4;
                        }
                        break;
                case 1:
-                       idx = dev->phy.is_40mhz ? 76 : 28;
+                       idx = b43_is_40mhz(dev) ? 76 : 28;
                        break;
                case 2:
-                       idx = dev->phy.is_40mhz ? 84 : 36;
+                       idx = b43_is_40mhz(dev) ? 84 : 36;
                        break;
                case 3:
-                       idx = dev->phy.is_40mhz ? 92 : 44;
+                       idx = b43_is_40mhz(dev) ? 92 : 44;
                        break;
                }
 
@@ -3474,6 +3613,7 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
 static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
 {
+       struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = dev->phy.n;
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
@@ -3483,7 +3623,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
        s32 num, den, pwr;
        u32 regval[64];
 
-       u16 freq = dev->phy.channel_freq;
+       u16 freq = phy->chandef->chan->center_freq;
        u16 tmp;
        u16 r; /* routing */
        u8 i, c;
@@ -3647,6 +3787,9 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
        int i;
 
        table = b43_nphy_get_tx_gain_table(dev);
+       if (!table)
+               return;
+
        b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
        b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
 
@@ -3705,21 +3848,28 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
-static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+/*
+ * TX low-pass filter bandwidth setup
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
+ */
+static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev)
 {
        u16 tmp;
 
-       if (dev->phy.rev >= 3) {
-               if (b43_nphy_ipa(dev)) {
-                       tmp = 4;
-                       b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
-                             (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
-               }
+       if (dev->phy.rev < 3 || dev->phy.rev >= 7)
+               return;
 
-               tmp = 1;
+       if (b43_nphy_ipa(dev))
+               tmp = b43_is_40mhz(dev) ? 5 : 4;
+       else
+               tmp = b43_is_40mhz(dev) ? 3 : 1;
+       b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+                     (tmp << 9) | (tmp << 6) | (tmp << 3) | tmp);
+
+       if (b43_nphy_ipa(dev)) {
+               tmp = b43_is_40mhz(dev) ? 4 : 1;
                b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
-                             (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+                             (tmp << 9) | (tmp << 6) | (tmp << 3) | tmp);
        }
 }
 
@@ -3992,7 +4142,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
 
        if (nphy->gband_spurwar_en) {
                /* TODO: N PHY Adjust Analog Pfbw (7) */
-               if (channel == 11 && dev->phy.is_40mhz)
+               if (channel == 11 && b43_is_40mhz(dev))
                        ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
                else
                        ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
@@ -4286,7 +4436,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
                        b43_phy_write(dev, B43_PHY_N(offset[i] + j),
                                        tbl_tx_filter_coef_rev4[i][j]);
 
-       if (dev->phy.is_40mhz) {
+       if (b43_is_40mhz(dev)) {
                for (j = 0; j < 15; j++)
                        b43_phy_write(dev, B43_PHY_N(offset[0] + j),
                                        tbl_tx_filter_coef_rev4[3][j]);
@@ -4345,6 +4495,9 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
 
                for (i = 0; i < 2; ++i) {
                        table = b43_nphy_get_tx_gain_table(dev);
+                       if (!table)
+                               break;
+
                        if (dev->phy.rev >= 3) {
                                target.ipa[i] = (table[index[i]] >> 16) & 0xF;
                                target.pad[i] = (table[index[i]] >> 20) & 0xF;
@@ -4500,8 +4653,9 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
                txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
                txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
        }
-       iqcal_chanspec->center_freq = dev->phy.channel_freq;
-       iqcal_chanspec->channel_type = dev->phy.channel_type;
+       iqcal_chanspec->center_freq = dev->phy.chandef->chan->center_freq;
+       iqcal_chanspec->channel_type =
+                               cfg80211_get_chandef_type(dev->phy.chandef);
        b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table);
 
        if (nphy->hang_avoid)
@@ -4581,6 +4735,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
                                struct nphy_txgains target,
                                bool full, bool mphase)
 {
+       struct b43_phy *phy = &dev->phy;
        struct b43_phy_n *nphy = dev->phy.n;
        int i;
        int error = 0;
@@ -4621,7 +4776,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
                (dev->phy.rev == 5 && nphy->ipa2g_on &&
                b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
        if (phy6or5x) {
-               if (dev->phy.is_40mhz) {
+               if (b43_is_40mhz(dev)) {
                        b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
                                        tbl_tx_iqlo_cal_loft_ladder_40);
                        b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
@@ -4636,16 +4791,16 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
 
        b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
 
-       if (!dev->phy.is_40mhz)
+       if (!b43_is_40mhz(dev))
                freq = 2500;
        else
                freq = 5000;
 
        if (nphy->mphase_cal_phase_id > 2)
-               b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
-                                       0xFFFF, 0, true, false);
+               b43_nphy_run_samples(dev, (b43_is_40mhz(dev) ? 40 : 20) * 8,
+                                    0xFFFF, 0, true, false, false);
        else
-               error = b43_nphy_tx_tone(dev, freq, 250, true, false);
+               error = b43_nphy_tx_tone(dev, freq, 250, true, false, false);
 
        if (error == 0) {
                if (nphy->mphase_cal_phase_id > 2) {
@@ -4773,9 +4928,9 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
                                                nphy->txiqlocal_bestc);
                        nphy->txiqlocal_coeffsvalid = true;
                        nphy->txiqlocal_chanspec.center_freq =
-                                                       dev->phy.channel_freq;
+                                               phy->chandef->chan->center_freq;
                        nphy->txiqlocal_chanspec.channel_type =
-                                                       dev->phy.channel_type;
+                                       cfg80211_get_chandef_type(phy->chandef);
                } else {
                        length = 11;
                        if (dev->phy.rev < 3)
@@ -4811,8 +4966,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
        bool equal = true;
 
        if (!nphy->txiqlocal_coeffsvalid ||
-           nphy->txiqlocal_chanspec.center_freq != dev->phy.channel_freq ||
-           nphy->txiqlocal_chanspec.channel_type != dev->phy.channel_type)
+           nphy->txiqlocal_chanspec.center_freq != dev->phy.chandef->chan->center_freq ||
+           nphy->txiqlocal_chanspec.channel_type != cfg80211_get_chandef_type(dev->phy.chandef))
                return;
 
        b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -4968,11 +5123,11 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
                        if (playtone) {
                                ret = b43_nphy_tx_tone(dev, 4000,
                                                (nphy->rxcalparams & 0xFFFF),
-                                               false, false);
+                                               false, false, true);
                                playtone = false;
                        } else {
-                               b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
-                                                       false, false);
+                               b43_nphy_run_samples(dev, 160, 0xFFFF, 0, false,
+                                                    false, true);
                        }
 
                        if (ret == 0) {
@@ -5344,7 +5499,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
        b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
        if (phy->rev >= 3 && phy->rev <= 6)
                b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0032);
-       b43_nphy_tx_lp_fbw(dev);
+       b43_nphy_tx_lpf_bw(dev);
        if (phy->rev >= 3)
                b43_nphy_spur_workaround(dev);
 
@@ -5430,14 +5585,14 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
        if (dev->phy.rev < 3)
                b43_nphy_adjust_lna_gain_table(dev);
 
-       b43_nphy_tx_lp_fbw(dev);
+       b43_nphy_tx_lpf_bw(dev);
 
        if (dev->phy.rev >= 3 &&
            dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
                bool avoid = false;
                if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
                        avoid = true;
-               } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+               } else if (!b43_is_40mhz(dev)) {
                        if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
                                avoid = true;
                } else { /* 40MHz */
@@ -5484,10 +5639,17 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
 
        const struct b43_nphy_channeltab_entry_rev2 *tabent_r2 = NULL;
        const struct b43_nphy_channeltab_entry_rev3 *tabent_r3 = NULL;
+       const struct b43_nphy_chantabent_rev7 *tabent_r7 = NULL;
+       const struct b43_nphy_chantabent_rev7_2g *tabent_r7_2g = NULL;
 
        u8 tmp;
 
-       if (dev->phy.rev >= 3) {
+       if (phy->rev >= 7) {
+               r2057_get_chantabent_rev7(dev, channel->center_freq,
+                                         &tabent_r7, &tabent_r7_2g);
+               if (!tabent_r7 && !tabent_r7_2g)
+                       return -ESRCH;
+       } else if (phy->rev >= 3) {
                tabent_r3 = b43_nphy_get_chantabent_rev3(dev,
                                                        channel->center_freq);
                if (!tabent_r3)
@@ -5502,20 +5664,36 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
        /* Channel is set later in common code, but we need to set it on our
           own to let this function's subcalls work properly. */
        phy->channel = channel->hw_value;
-       phy->channel_freq = channel->center_freq;
 
+#if 0
        if (b43_channel_type_is_40mhz(phy->channel_type) !=
                b43_channel_type_is_40mhz(channel_type))
                ; /* TODO: BMAC BW Set (channel_type) */
+#endif
 
-       if (channel_type == NL80211_CHAN_HT40PLUS)
-               b43_phy_set(dev, B43_NPHY_RXCTL,
-                               B43_NPHY_RXCTL_BSELU20);
-       else if (channel_type == NL80211_CHAN_HT40MINUS)
-               b43_phy_mask(dev, B43_NPHY_RXCTL,
-                               ~B43_NPHY_RXCTL_BSELU20);
+       if (channel_type == NL80211_CHAN_HT40PLUS) {
+               b43_phy_set(dev, B43_NPHY_RXCTL, B43_NPHY_RXCTL_BSELU20);
+               if (phy->rev >= 7)
+                       b43_phy_set(dev, 0x310, 0x8000);
+       } else if (channel_type == NL80211_CHAN_HT40MINUS) {
+               b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20);
+               if (phy->rev >= 7)
+                       b43_phy_mask(dev, 0x310, (u16)~0x8000);
+       }
 
-       if (dev->phy.rev >= 3) {
+       if (phy->rev >= 7) {
+               const struct b43_phy_n_sfo_cfg *phy_regs = tabent_r7 ?
+                       &(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
+
+               if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
+                       tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0;
+                       b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
+                       b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
+               }
+
+               b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
+               b43_nphy_channel_setup(dev, phy_regs, channel);
+       } else if (phy->rev >= 3) {
                tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
                b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
                b43_radio_2056_setup(dev, tabent_r3);
index d61d6830c5c77329abbd48aa79631307d0e28bc8..df3574545819d7f34249f59206ef0d3b1f40a473 100644 (file)
@@ -26,7 +26,7 @@
 #include "radio_2057.h"
 #include "phy_common.h"
 
-static u16 r2057_rev4_init[42][2] = {
+static u16 r2057_rev4_init[][2] = {
        { 0x0E, 0x20 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 },
        { 0x35, 0x26 }, { 0x3C, 0xff }, { 0x3D, 0xff }, { 0x3E, 0xff },
        { 0x3F, 0xff }, { 0x62, 0x33 }, { 0x8A, 0xf0 }, { 0x8B, 0x10 },
@@ -40,7 +40,7 @@ static u16 r2057_rev4_init[42][2] = {
        { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
 };
 
-static u16 r2057_rev5_init[44][2] = {
+static u16 r2057_rev5_init[][2] = {
        { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
        { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
        { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
@@ -54,7 +54,7 @@ static u16 r2057_rev5_init[44][2] = {
        { 0x1AC, 0x00 }, { 0x1B7, 0x0c }, { 0x1C1, 0x01 }, { 0x1C2, 0x80 },
 };
 
-static u16 r2057_rev5a_init[45][2] = {
+static u16 r2057_rev5a_init[][2] = {
        { 0x00, 0x15 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x23, 0x6 },
        { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 },
        { 0x59, 0x88 }, { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f },
@@ -69,7 +69,7 @@ static u16 r2057_rev5a_init[45][2] = {
        { 0x1C2, 0x80 },
 };
 
-static u16 r2057_rev7_init[54][2] = {
+static u16 r2057_rev7_init[][2] = {
        { 0x00, 0x00 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
        { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
        { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x13 },
@@ -86,7 +86,8 @@ static u16 r2057_rev7_init[54][2] = {
        { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
 };
 
-static u16 r2057_rev8_init[54][2] = {
+/* TODO: Which devices should use it?
+static u16 r2057_rev8_init[][2] = {
        { 0x00, 0x08 }, { 0x01, 0x57 }, { 0x02, 0x20 }, { 0x31, 0x00 },
        { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x51, 0x70 }, { 0x59, 0x88 },
        { 0x5C, 0x20 }, { 0x62, 0x33 }, { 0x63, 0x0f }, { 0x64, 0x0f },
@@ -102,6 +103,47 @@ static u16 r2057_rev8_init[54][2] = {
        { 0x1A6, 0x00 }, { 0x1AA, 0x00 }, { 0x1AB, 0x00 }, { 0x1AC, 0x00 },
        { 0x1B7, 0x05 }, { 0x1C2, 0xa0 },
 };
+*/
+
+#define RADIOREGS7(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
+                  r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
+                  r20, r21, r22, r23, r24, r25, r26, r27) \
+       .radio_vcocal_countval0                 = r00,  \
+       .radio_vcocal_countval1                 = r01,  \
+       .radio_rfpll_refmaster_sparextalsize    = r02,  \
+       .radio_rfpll_loopfilter_r1              = r03,  \
+       .radio_rfpll_loopfilter_c2              = r04,  \
+       .radio_rfpll_loopfilter_c1              = r05,  \
+       .radio_cp_kpd_idac                      = r06,  \
+       .radio_rfpll_mmd0                       = r07,  \
+       .radio_rfpll_mmd1                       = r08,  \
+       .radio_vcobuf_tune                      = r09,  \
+       .radio_logen_mx2g_tune                  = r10,  \
+       .radio_logen_mx5g_tune                  = r11,  \
+       .radio_logen_indbuf2g_tune              = r12,  \
+       .radio_logen_indbuf5g_tune              = r13,  \
+       .radio_txmix2g_tune_boost_pu_core0      = r14,  \
+       .radio_pad2g_tune_pus_core0             = r15,  \
+       .radio_pga_boost_tune_core0             = r16,  \
+       .radio_txmix5g_boost_tune_core0         = r17,  \
+       .radio_pad5g_tune_misc_pus_core0        = r18,  \
+       .radio_lna2g_tune_core0                 = r19,  \
+       .radio_lna5g_tune_core0                 = r20,  \
+       .radio_txmix2g_tune_boost_pu_core1      = r21,  \
+       .radio_pad2g_tune_pus_core1             = r22,  \
+       .radio_pga_boost_tune_core1             = r23,  \
+       .radio_txmix5g_boost_tune_core1         = r24,  \
+       .radio_pad5g_tune_misc_pus_core1        = r25,  \
+       .radio_lna2g_tune_core1                 = r26,  \
+       .radio_lna5g_tune_core1                 = r27
+
+#define PHYREGS(r0, r1, r2, r3, r4, r5)        \
+       .phy_regs.phy_bw1a      = r0,   \
+       .phy_regs.phy_bw2       = r1,   \
+       .phy_regs.phy_bw3       = r2,   \
+       .phy_regs.phy_bw4       = r3,   \
+       .phy_regs.phy_bw5       = r4,   \
+       .phy_regs.phy_bw6       = r5
 
 void r2057_upload_inittabs(struct b43_wldev *dev)
 {
@@ -109,33 +151,69 @@ void r2057_upload_inittabs(struct b43_wldev *dev)
        u16 *table = NULL;
        u16 size, i;
 
-       if (phy->rev == 7) {
+       switch (phy->rev) {
+       case 7:
                table = r2057_rev4_init[0];
                size = ARRAY_SIZE(r2057_rev4_init);
-       } else if (phy->rev == 8 || phy->rev == 9) {
+               break;
+       case 8:
                if (phy->radio_rev == 5) {
-                       if (phy->radio_rev == 8) {
-                               table = r2057_rev5_init[0];
-                               size = ARRAY_SIZE(r2057_rev5_init);
-                       } else {
-                               table = r2057_rev5a_init[0];
-                               size = ARRAY_SIZE(r2057_rev5a_init);
-                       }
+                       table = r2057_rev5_init[0];
+                       size = ARRAY_SIZE(r2057_rev5_init);
                } else if (phy->radio_rev == 7) {
                        table = r2057_rev7_init[0];
                        size = ARRAY_SIZE(r2057_rev7_init);
-               } else if (phy->radio_rev == 9) {
-                       table = r2057_rev8_init[0];
-                       size = ARRAY_SIZE(r2057_rev8_init);
                }
+               break;
+       case 9:
+               if (phy->radio_rev == 5) {
+                       table = r2057_rev5a_init[0];
+                       size = ARRAY_SIZE(r2057_rev5a_init);
+               }
+               break;
        }
 
+       B43_WARN_ON(!table);
+
        if (table) {
-               for (i = 0; i < 10; i++) {
-                       pr_info("radio_write 0x%X ", *table);
-                       table++;
-                       pr_info("0x%X\n", *table);
-                       table++;
+               for (i = 0; i < size; i++, table += 2)
+                       b43_radio_write(dev, table[0], table[1]);
+       }
+}
+
+void r2057_get_chantabent_rev7(struct b43_wldev *dev, u16 freq,
+                              const struct b43_nphy_chantabent_rev7 **tabent_r7,
+                              const struct b43_nphy_chantabent_rev7_2g **tabent_r7_2g)
+{
+       struct b43_phy *phy = &dev->phy;
+       const struct b43_nphy_chantabent_rev7 *e_r7 = NULL;
+       const struct b43_nphy_chantabent_rev7_2g *e_r7_2g = NULL;
+       unsigned int len, i;
+
+       *tabent_r7 = NULL;
+       *tabent_r7_2g = NULL;
+
+       /* TODO */
+       switch (phy->rev) {
+       default:
+               break;
+       }
+
+       if (e_r7) {
+               for (i = 0; i < len; i++, e_r7++) {
+                       if (e_r7->freq == freq) {
+                               *tabent_r7 = e_r7;
+                               return;
+                       }
+               }
+       } else if (e_r7_2g) {
+               for (i = 0; i < len; i++, e_r7_2g++) {
+                       if (e_r7_2g->freq == freq) {
+                               *tabent_r7_2g = e_r7_2g;
+                               return;
+                       }
                }
+       } else {
+               B43_WARN_ON(1);
        }
 }
index eeebd8fbeb0db19575aad27bba58e5a1763f6a8c..675d1bb64429d432762e50af57daed10a38d1d31 100644 (file)
 
 #define R2057_VCM_MASK                         0x7
 
+struct b43_nphy_chantabent_rev7 {
+       /* The channel frequency in MHz */
+       u16 freq;
+       /* Radio regs values on channelswitch */
+       u8 radio_vcocal_countval0;
+       u8 radio_vcocal_countval1;
+       u8 radio_rfpll_refmaster_sparextalsize;
+       u8 radio_rfpll_loopfilter_r1;
+       u8 radio_rfpll_loopfilter_c2;
+       u8 radio_rfpll_loopfilter_c1;
+       u8 radio_cp_kpd_idac;
+       u8 radio_rfpll_mmd0;
+       u8 radio_rfpll_mmd1;
+       u8 radio_vcobuf_tune;
+       u8 radio_logen_mx2g_tune;
+       u8 radio_logen_mx5g_tune;
+       u8 radio_logen_indbuf2g_tune;
+       u8 radio_logen_indbuf5g_tune;
+       u8 radio_txmix2g_tune_boost_pu_core0;
+       u8 radio_pad2g_tune_pus_core0;
+       u8 radio_pga_boost_tune_core0;
+       u8 radio_txmix5g_boost_tune_core0;
+       u8 radio_pad5g_tune_misc_pus_core0;
+       u8 radio_lna2g_tune_core0;
+       u8 radio_lna5g_tune_core0;
+       u8 radio_txmix2g_tune_boost_pu_core1;
+       u8 radio_pad2g_tune_pus_core1;
+       u8 radio_pga_boost_tune_core1;
+       u8 radio_txmix5g_boost_tune_core1;
+       u8 radio_pad5g_tune_misc_pus_core1;
+       u8 radio_lna2g_tune_core1;
+       u8 radio_lna5g_tune_core1;
+       /* PHY res values on channelswitch */
+       struct b43_phy_n_sfo_cfg phy_regs;
+};
+
+struct b43_nphy_chantabent_rev7_2g {
+       /* The channel frequency in MHz */
+       u16 freq;
+       /* Radio regs values on channelswitch */
+       u8 radio_vcocal_countval0;
+       u8 radio_vcocal_countval1;
+       u8 radio_rfpll_refmaster_sparextalsize;
+       u8 radio_rfpll_loopfilter_r1;
+       u8 radio_rfpll_loopfilter_c2;
+       u8 radio_rfpll_loopfilter_c1;
+       u8 radio_cp_kpd_idac;
+       u8 radio_rfpll_mmd0;
+       u8 radio_rfpll_mmd1;
+       u8 radio_vcobuf_tune;
+       u8 radio_logen_mx2g_tune;
+       u8 radio_logen_indbuf2g_tune;
+       u8 radio_txmix2g_tune_boost_pu_core0;
+       u8 radio_pad2g_tune_pus_core0;
+       u8 radio_lna2g_tune_core0;
+       u8 radio_txmix2g_tune_boost_pu_core1;
+       u8 radio_pad2g_tune_pus_core1;
+       u8 radio_lna2g_tune_core1;
+       /* PHY regs values on channelswitch */
+       struct b43_phy_n_sfo_cfg phy_regs;
+};
+
 void r2057_upload_inittabs(struct b43_wldev *dev);
 
+void r2057_get_chantabent_rev7(struct b43_wldev *dev, u16 freq,
+                              const struct b43_nphy_chantabent_rev7 **tabent_r7,
+                              const struct b43_nphy_chantabent_rev7_2g **tabent_r7_2g);
+
 #endif /* B43_RADIO_2057_H_ */
index 4047c05e380759f4f0b64bbdc5188f04efcce242..b28dce950e1f864c93f30e5cafe75e095410600f 100644 (file)
@@ -2146,7 +2146,196 @@ static const u16 b43_ntab_antswctl_r3[4][32] = {
        }
 };
 
-/* TX gain tables */
+/* static tables, PHY revision >= 7 */
+
+/* Copied from brcmsmac (5.75.11) */
+static const u32 b43_ntab_tmap_r7[] = {
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+       0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00011111,
+       0x11110000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa,
+       0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88,
+       0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888,
+       0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808,
+       0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08,
+       0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080,
+       0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x22000000, 0x2222b222, 0x22222222, 0x222222b2,
+       0xb2222220, 0x22222222, 0x22d22222, 0x00000222,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x33000000, 0x3333b333, 0x33333333, 0x333333b3,
+       0xb3333330, 0x33333333, 0x33d33333, 0x00000333,
+       0x22000000, 0x2222a222, 0x22222222, 0x222222a2,
+       0xa2222220, 0x22222222, 0x22c22222, 0x00000222,
+       0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x22222200, 0x2222f222, 0x22222222, 0x222222f2,
+       0x22222222, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x11111111,
+       0xf1111111, 0x11111111, 0x11f11111, 0x01111111,
+       0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b,
+       0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa,
+       0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a,
+       0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb,
+       0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999,
+       0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88,
+       0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a,
+       0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b,
+       0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909,
+       0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08,
+       0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a,
+       0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090,
+       0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090,
+       0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080,
+       0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x11000000, 0x1111f111, 0x11111111, 0x111111f1,
+       0xf1111110, 0x11111111, 0x11f11111, 0x00000111,
+       0x33000000, 0x3333f333, 0x33333333, 0x333333f3,
+       0xf3333330, 0x33333333, 0x33f33333, 0x00000333,
+       0x22000000, 0x2222f222, 0x22222222, 0x222222f2,
+       0xf2222220, 0x22222222, 0x22f22222, 0x00000222,
+       0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9,
+       0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888,
+       0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888,
+       0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x11000000, 0x1111a111, 0x11111111, 0x111111a1,
+       0xa1111110, 0x11111111, 0x11c11111, 0x00000111,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8,
+       0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Extracted from MMIO dump of 6.30.223.141 */
+static const u32 b43_ntab_noisevar_r7[] = {
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+       0x020c020c, 0x0000014d, 0x020c020c, 0x0000014d,
+};
+
+/**************************************************
+ * TX gain tables
+ **************************************************/
+
 static const u32 b43_ntab_tx_gain_rev0_1_2[] = {
        0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
        0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
@@ -2182,7 +2371,9 @@ static const u32 b43_ntab_tx_gain_rev0_1_2[] = {
        0x03801442, 0x03801344, 0x03801342, 0x00002b00,
 };
 
-static const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+/* EPA 2 GHz */
+
+static const u32 b43_ntab_tx_gain_epa_rev3_2g[] = {
        0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
        0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
        0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
@@ -2217,7 +2408,9 @@ static const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
        0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
 };
 
-static const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+/* EPA 5 GHz */
+
+static const u32 b43_ntab_tx_gain_epa_rev3_5g[] = {
        0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
        0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
        0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
@@ -2252,7 +2445,7 @@ static const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
        0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
 };
 
-static const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+static const u32 b43_ntab_tx_gain_epa_rev4_5g[] = {
        0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
        0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
        0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
@@ -2287,7 +2480,7 @@ static const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
        0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
 };
 
-static const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+static const u32 b43_ntab_tx_gain_epa_rev5_5g[] = {
        0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
        0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
        0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
@@ -2322,7 +2515,9 @@ static const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
        0x0062003b, 0x00620039, 0x00620037, 0x00620035,
 };
 
-static const u32 txpwrctrl_tx_gain_ipa[] = {
+/* IPA 2 GHz */
+
+static const u32 b43_ntab_tx_gain_ipa_rev3_2g[] = {
        0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
        0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
        0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
@@ -2357,7 +2552,7 @@ static const u32 txpwrctrl_tx_gain_ipa[] = {
        0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
 };
 
-static const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+static const u32 b43_ntab_tx_gain_ipa_rev5_2g[] = {
        0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
        0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
        0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
@@ -2392,7 +2587,7 @@ static const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
        0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
 };
 
-static const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+static const u32 b43_ntab_tx_gain_ipa_rev6_2g[] = {
        0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
        0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
        0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
@@ -2427,7 +2622,45 @@ static const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
        0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
 };
 
-static const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+/* Extracted from MMIO dump of 6.30.223.141 */
+static const u32 b43_ntab_tx_gain_ipa_2057_rev9_2g[] = {
+       0x60ff0031, 0x60e7002c, 0x60cf002a, 0x60c70029,
+       0x60b70029, 0x60a70029, 0x609f002a, 0x6097002b,
+       0x6087002e, 0x60770031, 0x606f0032, 0x60670034,
+       0x60670031, 0x605f0033, 0x605f0031, 0x60570033,
+       0x60570030, 0x6057002d, 0x6057002b, 0x604f002d,
+       0x604f002b, 0x604f0029, 0x604f0026, 0x60470029,
+       0x60470027, 0x603f0029, 0x603f0027, 0x603f0025,
+       0x60370029, 0x60370027, 0x60370024, 0x602f002a,
+       0x602f0028, 0x602f0026, 0x602f0024, 0x6027002a,
+       0x60270028, 0x60270026, 0x60270024, 0x60270022,
+       0x601f002b, 0x601f0029, 0x601f0027, 0x601f0024,
+       0x601f0022, 0x601f0020, 0x601f001f, 0x601f001d,
+       0x60170029, 0x60170027, 0x60170025, 0x60170023,
+       0x60170021, 0x6017001f, 0x6017001d, 0x6017001c,
+       0x6017001a, 0x60170018, 0x60170018, 0x60170016,
+       0x60170015, 0x600f0029, 0x600f0027, 0x600f0025,
+       0x600f0023, 0x600f0021, 0x600f001f, 0x600f001d,
+       0x600f001c, 0x600f001a, 0x600f0019, 0x600f0018,
+       0x600f0016, 0x600f0015, 0x600f0115, 0x600f0215,
+       0x600f0315, 0x600f0415, 0x600f0515, 0x600f0615,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+       0x600f0715, 0x600f0715, 0x600f0715, 0x600f0715,
+};
+
+/* IPA 2 5Hz */
+
+static const u32 b43_ntab_tx_gain_ipa_rev3_5g[] = {
        0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
        0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
        0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
@@ -2462,6 +2695,42 @@ static const u32 txpwrctrl_tx_gain_ipa_5g[] = {
        0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
 };
 
+/* Extracted from MMIO dump of 6.30.223.141 */
+static const u32 b43_ntab_tx_gain_ipa_2057_rev9_5g[] = {
+       0x7f7f0053, 0x7f7f004b, 0x7f7f0044, 0x7f7f003f,
+       0x7f7f0039, 0x7f7f0035, 0x7f7f0032, 0x7f7f0030,
+       0x7f7f002d, 0x7e7f0030, 0x7e7f002d, 0x7d7f0032,
+       0x7d7f002f, 0x7d7f002c, 0x7c7f0032, 0x7c7f0030,
+       0x7c7f002d, 0x7b7f0030, 0x7b7f002e, 0x7b7f002b,
+       0x7a7f0032, 0x7a7f0030, 0x7a7f002d, 0x7a7f002b,
+       0x797f0030, 0x797f002e, 0x797f002b, 0x797f0029,
+       0x787f0030, 0x787f002d, 0x787f002b, 0x777f0032,
+       0x777f0030, 0x777f002d, 0x777f002b, 0x767f0031,
+       0x767f002f, 0x767f002c, 0x767f002a, 0x757f0031,
+       0x757f002f, 0x757f002c, 0x757f002a, 0x747f0030,
+       0x747f002d, 0x747f002b, 0x737f0032, 0x737f002f,
+       0x737f002c, 0x737f002a, 0x727f0030, 0x727f002d,
+       0x727f002b, 0x727f0029, 0x717f0030, 0x717f002d,
+       0x717f002b, 0x707f0031, 0x707f002f, 0x707f002c,
+       0x707f002a, 0x707f0027, 0x707f0025, 0x707f0023,
+       0x707f0021, 0x707f001f, 0x707f001d, 0x707f001c,
+       0x707f001a, 0x707f0019, 0x707f0017, 0x707f0016,
+       0x707f0015, 0x707f0014, 0x707f0012, 0x707f0012,
+       0x707f0011, 0x707f0010, 0x707f000f, 0x707f000e,
+       0x707f000d, 0x707f000d, 0x707f000c, 0x707f000b,
+       0x707f000a, 0x707f000a, 0x707f0009, 0x707f0008,
+       0x707f0008, 0x707f0008, 0x707f0008, 0x707f0007,
+       0x707f0007, 0x707f0006, 0x707f0006, 0x707f0006,
+       0x707f0005, 0x707f0005, 0x707f0005, 0x707f0004,
+       0x707f0004, 0x707f0004, 0x707f0003, 0x707f0003,
+       0x707f0003, 0x707f0003, 0x707f0003, 0x707f0003,
+       0x707f0003, 0x707f0003, 0x707f0003, 0x707f0003,
+       0x707f0002, 0x707f0002, 0x707f0002, 0x707f0002,
+       0x707f0002, 0x707f0002, 0x707f0002, 0x707f0002,
+       0x707f0002, 0x707f0001, 0x707f0001, 0x707f0001,
+       0x707f0001, 0x707f0001, 0x707f0001, 0x707f0001,
+};
+
 const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[] = {
        -114, -108, -98, -91, -84, -78, -70, -62,
        -54, -46, -39, -31, -23, -15, -8, 0
@@ -3031,6 +3300,91 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
                b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \
        } while (0)
 
+static void b43_nphy_tables_init_shared_lut(struct b43_wldev *dev)
+{
+       ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
+       ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
+       ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
+       ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
+       ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
+       ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
+       ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
+       ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
+       ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
+       ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+}
+
+static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev)
+{
+       struct ssb_sprom *sprom = dev->dev->bus_sprom;
+       u8 antswlut;
+       int core, offset, i;
+
+       const int antswlut0_offsets[] = { 0, 4, 8, }; /* Offsets for values */
+       const u8 antswlut0_values[][3] = {
+               { 0x2, 0x12, 0x8 }, /* Core 0 */
+               { 0x2, 0x18, 0x2 }, /* Core 1 */
+       };
+
+       if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+               antswlut = sprom->fem.ghz5.antswlut;
+       else
+               antswlut = sprom->fem.ghz2.antswlut;
+
+       switch (antswlut) {
+       case 0:
+               for (core = 0; core < 2; core++) {
+                       for (i = 0; i < ARRAY_SIZE(antswlut0_values[0]); i++) {
+                               offset = core ? 0x20 : 0x00;
+                               offset += antswlut0_offsets[i];
+                               b43_ntab_write(dev, B43_NTAB8(9, offset),
+                                              antswlut0_values[core][i]);
+                       }
+               }
+               break;
+       default:
+               b43err(dev->wl, "Unsupported antswlut: %d\n", antswlut);
+               break;
+       }
+}
+
+static void b43_nphy_tables_init_rev16(struct b43_wldev *dev)
+{
+       /* Static tables */
+       if (dev->phy.do_full_init) {
+               ntab_upload(dev, B43_NTAB_NOISEVAR_R7, b43_ntab_noisevar_r7);
+               b43_nphy_tables_init_shared_lut(dev);
+       }
+
+       /* Volatile tables */
+       b43_nphy_tables_init_rev7_volatile(dev);
+}
+
+static void b43_nphy_tables_init_rev7(struct b43_wldev *dev)
+{
+       /* Static tables */
+       if (dev->phy.do_full_init) {
+               ntab_upload(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
+               ntab_upload(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
+               ntab_upload(dev, B43_NTAB_TMAP_R7, b43_ntab_tmap_r7);
+               ntab_upload(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3);
+               ntab_upload(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3);
+               ntab_upload(dev, B43_NTAB_NOISEVAR_R7, b43_ntab_noisevar_r7);
+               ntab_upload(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3);
+               ntab_upload(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3);
+               ntab_upload(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3);
+               ntab_upload(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3);
+               ntab_upload(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3);
+               ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
+               ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
+               ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
+               b43_nphy_tables_init_shared_lut(dev);
+       }
+
+       /* Volatile tables */
+       b43_nphy_tables_init_rev7_volatile(dev);
+}
+
 static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
 {
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -3057,16 +3411,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
                ntab_upload(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3);
                ntab_upload(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3);
                ntab_upload(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3);
-               ntab_upload(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3);
-               ntab_upload(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3);
-               ntab_upload(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3);
-               ntab_upload(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3);
-               ntab_upload(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3);
-               ntab_upload(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3);
-               ntab_upload(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3);
-               ntab_upload(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3);
-               ntab_upload(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3);
-               ntab_upload(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
+               b43_nphy_tables_init_shared_lut(dev);
        }
 
        /* Volatile tables */
@@ -3115,7 +3460,11 @@ static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
 void b43_nphy_tables_init(struct b43_wldev *dev)
 {
-       if (dev->phy.rev >= 3)
+       if (dev->phy.rev >= 16)
+               b43_nphy_tables_init_rev16(dev);
+       else if (dev->phy.rev >= 7)
+               b43_nphy_tables_init_rev7(dev);
+       else if (dev->phy.rev >= 3)
                b43_nphy_tables_init_rev3(dev);
        else
                b43_nphy_tables_init_rev0(dev);
@@ -3124,23 +3473,45 @@ void b43_nphy_tables_init(struct b43_wldev *dev)
 /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
 static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
 {
+       struct b43_phy *phy = &dev->phy;
+
        if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
-               if (dev->phy.rev >= 6) {
-                       if (dev->dev->chip_id == 47162)
-                               return txpwrctrl_tx_gain_ipa_rev5;
-                       return txpwrctrl_tx_gain_ipa_rev6;
-               } else if (dev->phy.rev >= 5) {
-                       return txpwrctrl_tx_gain_ipa_rev5;
-               } else {
-                       return txpwrctrl_tx_gain_ipa;
+               switch (phy->rev) {
+               case 16:
+                       if (phy->radio_rev == 9)
+                               return b43_ntab_tx_gain_ipa_2057_rev9_2g;
+               case 6:
+                       if (dev->dev->chip_id == BCMA_CHIP_ID_BCM47162)
+                               return b43_ntab_tx_gain_ipa_rev5_2g;
+                       return b43_ntab_tx_gain_ipa_rev6_2g;
+               case 5:
+                       return b43_ntab_tx_gain_ipa_rev5_2g;
+               case 4:
+               case 3:
+                       return b43_ntab_tx_gain_ipa_rev3_2g;
+               default:
+                       b43err(dev->wl,
+                              "No 2GHz IPA gain table available for this device\n");
+                       return NULL;
                }
        } else {
-               return txpwrctrl_tx_gain_ipa_5g;
+               switch (phy->rev) {
+               case 16:
+                       if (phy->radio_rev == 9)
+                               return b43_ntab_tx_gain_ipa_2057_rev9_5g;
+               case 3 ... 6:
+                       return b43_ntab_tx_gain_ipa_rev3_5g;
+               default:
+                       b43err(dev->wl,
+                              "No 5GHz IPA gain table available for this device\n");
+                       return NULL;
+               }
        }
 }
 
 const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
 {
+       struct b43_phy *phy = &dev->phy;
        enum ieee80211_band band = b43_current_band(dev->wl);
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
 
@@ -3152,19 +3523,36 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
            (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
                return b43_nphy_get_ipa_gain_table(dev);
        } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
-               if (dev->phy.rev == 3)
-                       return b43_ntab_tx_gain_rev3_5ghz;
-               if (dev->phy.rev == 4)
+               switch (phy->rev) {
+               case 6:
+               case 5:
+                       return b43_ntab_tx_gain_epa_rev5_5g;
+               case 4:
                        return sprom->fem.ghz5.extpa_gain == 3 ?
-                               b43_ntab_tx_gain_rev4_5ghz :
-                               b43_ntab_tx_gain_rev4_5ghz; /* FIXME */
-               else
-                       return b43_ntab_tx_gain_rev5plus_5ghz;
+                               b43_ntab_tx_gain_epa_rev4_5g :
+                               b43_ntab_tx_gain_epa_rev4_5g; /* FIXME */
+               case 3:
+                       return b43_ntab_tx_gain_epa_rev3_5g;
+               default:
+                       b43err(dev->wl,
+                              "No 5GHz EPA gain table available for this device\n");
+                       return NULL;
+               }
        } else {
-               if (dev->phy.rev >= 5 && sprom->fem.ghz5.extpa_gain == 3)
-                       return b43_ntab_tx_gain_rev3plus_2ghz; /* FIXME */
-               else
-                       return b43_ntab_tx_gain_rev3plus_2ghz;
+               switch (phy->rev) {
+               case 6:
+               case 5:
+                       if (sprom->fem.ghz5.extpa_gain == 3)
+                               return b43_ntab_tx_gain_epa_rev3_2g; /* FIXME */
+                       /* fall through */
+               case 4:
+               case 3:
+                       return b43_ntab_tx_gain_epa_rev3_2g;
+               default:
+                       b43err(dev->wl,
+                              "No 2GHz EPA gain table available for this device\n");
+                       return NULL;
+               }
        }
 }
 
@@ -3191,7 +3579,7 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
        /* Some workarounds to the workarounds... */
        if (ghz5 && dev->phy.rev >= 6) {
                if (dev->phy.radio_rev == 11 &&
-                   !b43_channel_type_is_40mhz(dev->phy.channel_type))
+                   !b43_is_40mhz(dev))
                        e->cliplo_gain = 0x2d;
        } else if (!ghz5 && dev->phy.rev >= 5) {
                static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a,
index 3a58aee4c4cf714aa72bc22a8c85670b135eb934..3ce2e6f3a2781d168cf0955f6246b24a1d5d1181 100644 (file)
@@ -165,6 +165,10 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
 #define B43_NTAB_C1_LOFEEDTH_R3                B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */
 #define B43_NTAB_C1_PAPD_COMP_R3       B43_NTAB16(27, 576)
 
+/* Static N-PHY tables, PHY revision >= 7 */
+#define B43_NTAB_TMAP_R7               B43_NTAB32(12,   0) /* TM AP */
+#define B43_NTAB_NOISEVAR_R7           B43_NTAB32(16,   0) /* noise variance */
+
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE       18
 #define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE      18
index 98e67c18f276471d804e751122ae63c2f26662bf..4cffb2ee36738106d5543c4687f8406f4b3e1847 100644 (file)
@@ -34,7 +34,8 @@ brcmfmac-objs += \
                dhd_common.o \
                dhd_linux.o \
                firmware.o \
-               btcoex.o
+               btcoex.o \
+               vendor.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
                dhd_sdio.o \
                bcmsdh.o
index 0cb591b050b3c726a0657ec55043c3c2648b4e9d..a29ac4977b3a12e009f2947c09bd40d3b604ccde 100644 (file)
@@ -157,7 +157,7 @@ static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
                 */
 
                /* save current */
-               brcmf_dbg(TRACE, "new SCO/eSCO coex algo {save & override}\n");
+               brcmf_dbg(INFO, "new SCO/eSCO coex algo {save & override}\n");
                brcmf_btcoex_params_read(ifp, 50, &btci->reg50);
                brcmf_btcoex_params_read(ifp, 51, &btci->reg51);
                brcmf_btcoex_params_read(ifp, 64, &btci->reg64);
@@ -165,7 +165,7 @@ static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
                brcmf_btcoex_params_read(ifp, 71, &btci->reg71);
 
                btci->saved_regs_part2 = true;
-               brcmf_dbg(TRACE,
+               brcmf_dbg(INFO,
                          "saved bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
                          btci->reg50, btci->reg51, btci->reg64,
                          btci->reg65, btci->reg71);
@@ -179,21 +179,21 @@ static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
 
        } else if (btci->saved_regs_part2) {
                /* restore previously saved bt params */
-               brcmf_dbg(TRACE, "Do new SCO/eSCO coex algo {restore}\n");
+               brcmf_dbg(INFO, "Do new SCO/eSCO coex algo {restore}\n");
                brcmf_btcoex_params_write(ifp, 50, btci->reg50);
                brcmf_btcoex_params_write(ifp, 51, btci->reg51);
                brcmf_btcoex_params_write(ifp, 64, btci->reg64);
                brcmf_btcoex_params_write(ifp, 65, btci->reg65);
                brcmf_btcoex_params_write(ifp, 71, btci->reg71);
 
-               brcmf_dbg(TRACE,
+               brcmf_dbg(INFO,
                          "restored bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n",
                          btci->reg50, btci->reg51, btci->reg64,
                          btci->reg65, btci->reg71);
 
                btci->saved_regs_part2 = false;
        } else {
-               brcmf_err("attempted to restore not saved BTCOEX params\n");
+               brcmf_dbg(INFO, "attempted to restore not saved BTCOEX params\n");
        }
 }
 
@@ -219,14 +219,14 @@ static bool brcmf_btcoex_is_sco_active(struct brcmf_if *ifp)
                        break;
                }
 
-               brcmf_dbg(TRACE, "sample[%d], btc_params 27:%x\n", i, param27);
+               brcmf_dbg(INFO, "sample[%d], btc_params 27:%x\n", i, param27);
 
                if ((param27 & 0x6) == 2) { /* count both sco & esco  */
                        sco_id_cnt++;
                }
 
                if (sco_id_cnt > 2) {
-                       brcmf_dbg(TRACE,
+                       brcmf_dbg(INFO,
                                  "sco/esco detected, pkt id_cnt:%d samples:%d\n",
                                  sco_id_cnt, i);
                        res = true;
@@ -250,7 +250,7 @@ static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
                brcmf_btcoex_params_read(ifp, 41, &btci->reg41);
                brcmf_btcoex_params_read(ifp, 68, &btci->reg68);
                btci->saved_regs_part1 = true;
-               brcmf_dbg(TRACE,
+               brcmf_dbg(INFO,
                          "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n",
                          btci->reg66, btci->reg41,
                          btci->reg68);
@@ -270,7 +270,7 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
                brcmf_btcoex_params_write(ifp, 66, btci->reg66);
                brcmf_btcoex_params_write(ifp, 41, btci->reg41);
                brcmf_btcoex_params_write(ifp, 68, btci->reg68);
-               brcmf_dbg(TRACE,
+               brcmf_dbg(INFO,
                          "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n",
                          btci->reg66, btci->reg41,
                          btci->reg68);
@@ -307,7 +307,7 @@ static void brcmf_btcoex_handler(struct work_struct *work)
                /* DHCP started provide OPPORTUNITY window
                   to get DHCP address
                */
-               brcmf_dbg(TRACE, "DHCP started\n");
+               brcmf_dbg(INFO, "DHCP started\n");
                btci->bt_state = BRCMF_BT_DHCP_OPPR_WIN;
                if (btci->timeout < BRCMF_BTCOEX_OPPR_WIN_TIME) {
                        mod_timer(&btci->timer, btci->timer.expires);
@@ -322,12 +322,12 @@ static void brcmf_btcoex_handler(struct work_struct *work)
 
        case BRCMF_BT_DHCP_OPPR_WIN:
                if (btci->dhcp_done) {
-                       brcmf_dbg(TRACE, "DHCP done before T1 expiration\n");
+                       brcmf_dbg(INFO, "DHCP done before T1 expiration\n");
                        goto idle;
                }
 
                /* DHCP is not over yet, start lowering BT priority */
-               brcmf_dbg(TRACE, "DHCP T1:%d expired\n",
+               brcmf_dbg(INFO, "DHCP T1:%d expired\n",
                          BRCMF_BTCOEX_OPPR_WIN_TIME);
                brcmf_btcoex_boost_wifi(btci, true);
 
@@ -339,9 +339,9 @@ static void brcmf_btcoex_handler(struct work_struct *work)
 
        case BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT:
                if (btci->dhcp_done)
-                       brcmf_dbg(TRACE, "DHCP done before T2 expiration\n");
+                       brcmf_dbg(INFO, "DHCP done before T2 expiration\n");
                else
-                       brcmf_dbg(TRACE, "DHCP T2:%d expired\n",
+                       brcmf_dbg(INFO, "DHCP T2:%d expired\n",
                                  BRCMF_BT_DHCP_FLAG_FORCE_TIMEOUT);
 
                goto idle;
@@ -440,13 +440,13 @@ static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci)
        /* Stop any bt timer because DHCP session is done */
        btci->dhcp_done = true;
        if (btci->timer_on) {
-               brcmf_dbg(TRACE, "disable BT DHCP Timer\n");
+               brcmf_dbg(INFO, "disable BT DHCP Timer\n");
                btci->timer_on = false;
                del_timer_sync(&btci->timer);
 
                /* schedule worker if transition to IDLE is needed */
                if (btci->bt_state != BRCMF_BT_DHCP_IDLE) {
-                       brcmf_dbg(TRACE, "bt_state:%d\n",
+                       brcmf_dbg(INFO, "bt_state:%d\n",
                                  btci->bt_state);
                        schedule_work(&btci->work);
                }
@@ -472,7 +472,7 @@ int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
 
        switch (mode) {
        case BRCMF_BTCOEX_DISABLED:
-               brcmf_dbg(TRACE, "DHCP session starts\n");
+               brcmf_dbg(INFO, "DHCP session starts\n");
                if (btci->bt_state != BRCMF_BT_DHCP_IDLE)
                        return -EBUSY;
                /* Start BT timer only for SCO connection */
@@ -484,14 +484,14 @@ int brcmf_btcoex_set_mode(struct brcmf_cfg80211_vif *vif,
                break;
 
        case BRCMF_BTCOEX_ENABLED:
-               brcmf_dbg(TRACE, "DHCP session ends\n");
+               brcmf_dbg(INFO, "DHCP session ends\n");
                if (btci->bt_state != BRCMF_BT_DHCP_IDLE &&
                    vif == btci->vif) {
                        brcmf_btcoex_dhcp_end(btci);
                }
                break;
        default:
-               brcmf_dbg(TRACE, "Unknown mode, ignored\n");
+               brcmf_dbg(INFO, "Unknown mode, ignored\n");
        }
        return 0;
 }
index 16f9ab2568a8089c1c38eff8f8998e8fc29ee330..a8998eb60d22166eef9c51f027c5b7d41d2ae020 100644 (file)
  */
 #define BRCMF_DRIVER_FIRMWARE_VERSION_LEN      32
 
-/* Bus independent dongle command */
-struct brcmf_dcmd {
-       uint cmd;               /* common dongle cmd definition */
-       void *buf;              /* pointer to user buffer */
-       uint len;               /* length of user buffer */
-       u8 set;                 /* get or set request (optional) */
-       uint used;              /* bytes read or written (optional) */
-       uint needed;            /* bytes needed (optional) */
-};
-
 /**
  * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
  *
index ed3e32ce8c23ee8fd032ad35520c7fee1e3cb18a..d991f8e3d9ece11e6882d4569e58311ec689c0a5 100644 (file)
@@ -282,6 +282,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
        ptr = strrchr(buf, ' ') + 1;
        strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
 
+       /* set mpc */
+       err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
+       if (err) {
+               brcmf_err("failed setting mpc\n");
+               goto done;
+       }
+
        /*
         * Setup timeout if Beacons are lost and roam is off to report
         * link down
index 09dd8c13d8448392372299c6eb8954132fca9c72..2699441d4f41220537ea13c268a88276fdcc8dbd 100644 (file)
@@ -808,7 +808,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
        } else {
                brcmf_dbg(INFO, "allocate netdev interface\n");
                /* Allocate netdev, including space for private structure */
-               ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
+               ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
+                                   ether_setup);
                if (!ndev)
                        return ERR_PTR(-ENOMEM);
 
index 59a5af5bf994d88b3e4f969c8e0633de3bd57d87..ded328f80cd1237454274ce53fd39433249b0e85 100644 (file)
@@ -54,7 +54,7 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
        if (err >= 0)
                err = 0;
        else
-               brcmf_err("Failed err=%d\n", err);
+               brcmf_dbg(FIL, "Failed err=%d\n", err);
 
        return err;
 }
index f3445ac627e48d84ef0391f59b5904beb9bf80f9..057b982ea8b37f90792fc53ca9337a9f13897e32 100644 (file)
@@ -708,7 +708,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans,
                active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
        else if (num_chans == AF_PEER_SEARCH_CNT)
                active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
-       else if (wl_get_vif_state_all(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
+       else if (brcmf_get_vif_state_any(p2p->cfg, BRCMF_VIF_STATUS_CONNECTED))
                active = -1;
        else
                active = P2PAPI_SCAN_DWELL_TIME_MS;
@@ -2364,7 +2364,6 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
                return 0;
        default:
                return -ENOTSUPP;
-               break;
        }
 
        clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
index d06fcb05adf2517a292ab727467e69d0abf28259..b732a99e402cb8f494d40cd91ed865d58ec27dc6 100644 (file)
 #include "usb_rdl.h"
 #include "usb.h"
 
-#define IOCTL_RESP_TIMEOUT  2000
+#define IOCTL_RESP_TIMEOUT             2000
 
 #define BRCMF_USB_RESET_GETVER_SPINWAIT        100     /* in unit of ms */
 #define BRCMF_USB_RESET_GETVER_LOOP_CNT        10
 
 #define BRCMF_POSTBOOT_ID              0xA123  /* ID to detect if dongle
                                                   has boot up */
-#define BRCMF_USB_NRXQ 50
-#define BRCMF_USB_NTXQ 50
+#define BRCMF_USB_NRXQ                 50
+#define BRCMF_USB_NTXQ                 50
 
-#define CONFIGDESC(usb)         (&((usb)->actconfig)->desc)
-#define IFPTR(usb, idx)         ((usb)->actconfig->interface[(idx)])
-#define IFALTS(usb, idx)        (IFPTR((usb), (idx))->altsetting[0])
-#define IFDESC(usb, idx)        IFALTS((usb), (idx)).desc
-#define IFEPDESC(usb, idx, ep)  (IFALTS((usb), (idx)).endpoint[(ep)]).desc
+#define BRCMF_USB_CBCTL_WRITE          0
+#define BRCMF_USB_CBCTL_READ           1
+#define BRCMF_USB_MAX_PKT_SIZE         1600
 
-#define CONTROL_IF              0
-#define BULK_IF                 0
-
-#define BRCMF_USB_CBCTL_WRITE  0
-#define BRCMF_USB_CBCTL_READ   1
-#define BRCMF_USB_MAX_PKT_SIZE 1600
-
-#define BRCMF_USB_43143_FW_NAME        "brcm/brcmfmac43143.bin"
-#define BRCMF_USB_43236_FW_NAME        "brcm/brcmfmac43236b.bin"
-#define BRCMF_USB_43242_FW_NAME        "brcm/brcmfmac43242a.bin"
+#define BRCMF_USB_43143_FW_NAME                "brcm/brcmfmac43143.bin"
+#define BRCMF_USB_43236_FW_NAME                "brcm/brcmfmac43236b.bin"
+#define BRCMF_USB_43242_FW_NAME                "brcm/brcmfmac43242a.bin"
+#define BRCMF_USB_43569_FW_NAME                "brcm/brcmfmac43569.bin"
 
 struct brcmf_usb_image {
        struct list_head list;
@@ -70,7 +62,7 @@ struct brcmf_usbdev_info {
        struct list_head rx_postq;
        struct list_head tx_freeq;
        struct list_head tx_postq;
-       uint rx_pipe, tx_pipe, rx_pipe2;
+       uint rx_pipe, tx_pipe;
 
        int rx_low_watermark;
        int tx_low_watermark;
@@ -97,6 +89,7 @@ struct brcmf_usbdev_info {
        int ctl_completed;
        wait_queue_head_t ioctl_resp_wait;
        ulong ctl_op;
+       u8 ifnum;
 
        struct urb *bulk_urb; /* used for FW download */
 };
@@ -576,7 +569,6 @@ fail:
 static int brcmf_usb_up(struct device *dev)
 {
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
-       u16 ifnum;
 
        brcmf_dbg(USB, "Enter\n");
        if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
@@ -589,21 +581,19 @@ static int brcmf_usb_up(struct device *dev)
                devinfo->ctl_in_pipe = usb_rcvctrlpipe(devinfo->usbdev, 0);
                devinfo->ctl_out_pipe = usb_sndctrlpipe(devinfo->usbdev, 0);
 
-               ifnum = IFDESC(devinfo->usbdev, CONTROL_IF).bInterfaceNumber;
-
                /* CTL Write */
                devinfo->ctl_write.bRequestType =
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
                devinfo->ctl_write.bRequest = 0;
                devinfo->ctl_write.wValue = cpu_to_le16(0);
-               devinfo->ctl_write.wIndex = cpu_to_le16p(&ifnum);
+               devinfo->ctl_write.wIndex = cpu_to_le16(devinfo->ifnum);
 
                /* CTL Read */
                devinfo->ctl_read.bRequestType =
                        USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
                devinfo->ctl_read.bRequest = 1;
                devinfo->ctl_read.wValue = cpu_to_le16(0);
-               devinfo->ctl_read.wIndex = cpu_to_le16p(&ifnum);
+               devinfo->ctl_read.wIndex = cpu_to_le16(devinfo->ifnum);
        }
        brcmf_usb_rx_fill_all(devinfo);
        return 0;
@@ -642,19 +632,19 @@ brcmf_usb_sync_complete(struct urb *urb)
        brcmf_usb_ioctl_resp_wake(devinfo);
 }
 
-static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
-                            void *buffer, int buflen)
+static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
+                           void *buffer, int buflen)
 {
-       int ret = 0;
+       int ret;
        char *tmpbuf;
        u16 size;
 
        if ((!devinfo) || (devinfo->ctl_urb == NULL))
-               return false;
+               return -EINVAL;
 
        tmpbuf = kmalloc(buflen, GFP_ATOMIC);
        if (!tmpbuf)
-               return false;
+               return -ENOMEM;
 
        size = buflen;
        devinfo->ctl_urb->transfer_buffer_length = size;
@@ -675,14 +665,16 @@ static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
        ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
        if (ret < 0) {
                brcmf_err("usb_submit_urb failed %d\n", ret);
-               kfree(tmpbuf);
-               return false;
+               goto finalize;
        }
 
-       ret = brcmf_usb_ioctl_resp_wait(devinfo);
-       memcpy(buffer, tmpbuf, buflen);
-       kfree(tmpbuf);
+       if (!brcmf_usb_ioctl_resp_wait(devinfo))
+               ret = -ETIMEDOUT;
+       else
+               memcpy(buffer, tmpbuf, buflen);
 
+finalize:
+       kfree(tmpbuf);
        return ret;
 }
 
@@ -724,6 +716,7 @@ brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
 {
        struct bootrom_id_le id;
        u32 loop_cnt;
+       int err;
 
        brcmf_dbg(USB, "Enter\n");
 
@@ -732,7 +725,9 @@ brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
                mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
                loop_cnt++;
                id.chip = cpu_to_le32(0xDEAD);       /* Get the ID */
-               brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
+               err = brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
+               if ((err) && (err != -ETIMEDOUT))
+                       return err;
                if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
                        break;
        } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
@@ -794,8 +789,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
        }
 
        /* 1) Prepare USB boot loader for runtime image */
-       brcmf_usb_dl_cmd(devinfo, DL_START, &state,
-                        sizeof(struct rdl_state_le));
+       brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
 
        rdlstate = le32_to_cpu(state.state);
        rdlbytes = le32_to_cpu(state.bytes);
@@ -839,10 +833,10 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
                        dlpos += sendlen;
                        sent += sendlen;
                }
-               if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
-                                     sizeof(struct rdl_state_le))) {
-                       brcmf_err("DL_GETSTATE Failed xxxx\n");
-                       err = -EINVAL;
+               err = brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
+                                      sizeof(state));
+               if (err) {
+                       brcmf_err("DL_GETSTATE Failed\n");
                        goto fail;
                }
 
@@ -898,13 +892,12 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
                return -EINVAL;
 
        /* Check we are runnable */
-       brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
-               sizeof(struct rdl_state_le));
+       state.state = 0;
+       brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, sizeof(state));
 
        /* Start the image */
        if (state.state == cpu_to_le32(DL_RUNNABLE)) {
-               if (!brcmf_usb_dl_cmd(devinfo, DL_GO, &state,
-                       sizeof(struct rdl_state_le)))
+               if (brcmf_usb_dl_cmd(devinfo, DL_GO, &state, sizeof(state)))
                        return -ENODEV;
                if (brcmf_usb_resetcfg(devinfo))
                        return -ENODEV;
@@ -928,6 +921,9 @@ static bool brcmf_usb_chip_support(int chipid, int chiprev)
                return (chiprev == 3);
        case 43242:
                return true;
+       case 43566:
+       case 43569:
+               return true;
        default:
                break;
        }
@@ -1028,6 +1024,9 @@ static const char *brcmf_usb_get_fwname(struct brcmf_usbdev_info *devinfo)
                return BRCMF_USB_43236_FW_NAME;
        case 43242:
                return BRCMF_USB_43242_FW_NAME;
+       case 43566:
+       case 43569:
+               return BRCMF_USB_43569_FW_NAME;
        default:
                return NULL;
        }
@@ -1222,15 +1221,15 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
 static int
 brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
-       int ep;
-       struct usb_endpoint_descriptor *endpoint;
-       int ret = 0;
        struct usb_device *usb = interface_to_usbdev(intf);
-       int num_of_eps;
-       u8 endpoint_num;
        struct brcmf_usbdev_info *devinfo;
+       struct usb_interface_descriptor *desc;
+       struct usb_endpoint_descriptor *endpoint;
+       int ret = 0;
+       u32 num_of_eps;
+       u8 endpoint_num, ep;
 
-       brcmf_dbg(USB, "Enter\n");
+       brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
 
        devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
        if (devinfo == NULL)
@@ -1238,92 +1237,71 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        devinfo->usbdev = usb;
        devinfo->dev = &usb->dev;
-
        usb_set_intfdata(intf, devinfo);
 
        /* Check that the device supports only one configuration */
        if (usb->descriptor.bNumConfigurations != 1) {
-               ret = -1;
-               goto fail;
-       }
-
-       if (usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
-               ret = -1;
+               brcmf_err("Number of configurations: %d not supported\n",
+                         usb->descriptor.bNumConfigurations);
+               ret = -ENODEV;
                goto fail;
        }
 
-       /*
-        * Only the BDC interface configuration is supported:
-        *      Device class: USB_CLASS_VENDOR_SPEC
-        *      if0 class: USB_CLASS_VENDOR_SPEC
-        *      if0/ep0: control
-        *      if0/ep1: bulk in
-        *      if0/ep2: bulk out (ok if swapped with bulk in)
-        */
-       if (CONFIGDESC(usb)->bNumInterfaces != 1) {
-               ret = -1;
+       if ((usb->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) &&
+           (usb->descriptor.bDeviceClass != USB_CLASS_MISC) &&
+           (usb->descriptor.bDeviceClass != USB_CLASS_WIRELESS_CONTROLLER)) {
+               brcmf_err("Device class: 0x%x not supported\n",
+                         usb->descriptor.bDeviceClass);
+               ret = -ENODEV;
                goto fail;
        }
 
-       /* Check interface */
-       if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
-           IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
-           IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) {
-               brcmf_err("invalid control interface: class %d, subclass %d, proto %d\n",
-                         IFDESC(usb, CONTROL_IF).bInterfaceClass,
-                         IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
-                         IFDESC(usb, CONTROL_IF).bInterfaceProtocol);
-               ret = -1;
+       desc = &intf->altsetting[0].desc;
+       if ((desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
+           (desc->bInterfaceSubClass != 2) ||
+           (desc->bInterfaceProtocol != 0xff)) {
+               brcmf_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n",
+                         desc->bInterfaceNumber, desc->bInterfaceClass,
+                         desc->bInterfaceSubClass, desc->bInterfaceProtocol);
+               ret = -ENODEV;
                goto fail;
        }
 
-       /* Check control endpoint */
-       endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
-       if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
-               != USB_ENDPOINT_XFER_INT) {
-               brcmf_err("invalid control endpoint %d\n",
-                         endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
-               ret = -1;
-               goto fail;
-       }
-
-       devinfo->rx_pipe = 0;
-       devinfo->rx_pipe2 = 0;
-       devinfo->tx_pipe = 0;
-       num_of_eps = IFDESC(usb, BULK_IF).bNumEndpoints - 1;
-
-       /* Check data endpoints and get pipes */
-       for (ep = 1; ep <= num_of_eps; ep++) {
-               endpoint = &IFEPDESC(usb, BULK_IF, ep);
-               if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
-                   USB_ENDPOINT_XFER_BULK) {
-                       brcmf_err("invalid data endpoint %d\n", ep);
-                       ret = -1;
-                       goto fail;
-               }
-
-               endpoint_num = endpoint->bEndpointAddress &
-                              USB_ENDPOINT_NUMBER_MASK;
-               if ((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
-                       == USB_DIR_IN) {
-                       if (!devinfo->rx_pipe) {
+       num_of_eps = desc->bNumEndpoints;
+       for (ep = 0; ep < num_of_eps; ep++) {
+               endpoint = &intf->altsetting[0].endpoint[ep].desc;
+               endpoint_num = usb_endpoint_num(endpoint);
+               if (!usb_endpoint_xfer_bulk(endpoint))
+                       continue;
+               if (usb_endpoint_dir_in(endpoint)) {
+                       if (!devinfo->rx_pipe)
                                devinfo->rx_pipe =
                                        usb_rcvbulkpipe(usb, endpoint_num);
-                       } else {
-                               devinfo->rx_pipe2 =
-                                       usb_rcvbulkpipe(usb, endpoint_num);
-                       }
                } else {
-                       devinfo->tx_pipe = usb_sndbulkpipe(usb, endpoint_num);
+                       if (!devinfo->tx_pipe)
+                               devinfo->tx_pipe =
+                                       usb_sndbulkpipe(usb, endpoint_num);
                }
        }
+       if (devinfo->rx_pipe == 0) {
+               brcmf_err("No RX (in) Bulk EP found\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+       if (devinfo->tx_pipe == 0) {
+               brcmf_err("No TX (out) Bulk EP found\n");
+               ret = -ENODEV;
+               goto fail;
+       }
+
+       devinfo->ifnum = desc->bInterfaceNumber;
 
        if (usb->speed == USB_SPEED_SUPER)
-               brcmf_dbg(USB, "Broadcom super speed USB wireless device detected\n");
+               brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
        else if (usb->speed == USB_SPEED_HIGH)
-               brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
+               brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
        else
-               brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
+               brcmf_dbg(USB, "Broadcom full speed USB WLAN interface detected\n");
 
        ret = brcmf_usb_probe_cb(devinfo);
        if (ret)
@@ -1333,11 +1311,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        return 0;
 
 fail:
-       brcmf_err("failed with errno %d\n", ret);
        kfree(devinfo);
        usb_set_intfdata(intf, NULL);
        return ret;
-
 }
 
 static void
@@ -1382,6 +1358,7 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
 {
        struct usb_device *usb = interface_to_usbdev(intf);
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
+
        brcmf_dbg(USB, "Enter\n");
 
        return brcmf_fw_get_firmwares(&usb->dev, 0,
@@ -1393,12 +1370,14 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
 #define BRCMF_USB_DEVICE_ID_43143      0xbd1e
 #define BRCMF_USB_DEVICE_ID_43236      0xbd17
 #define BRCMF_USB_DEVICE_ID_43242      0xbd1f
+#define BRCMF_USB_DEVICE_ID_43569      0xbd27
 #define BRCMF_USB_DEVICE_ID_BCMFW      0x0bdc
 
 static struct usb_device_id brcmf_usb_devid_table[] = {
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43143) },
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43236) },
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43242) },
+       { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_43569) },
        /* special entry for device with firmware loaded and running */
        { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
        { }
@@ -1408,6 +1387,7 @@ MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
 MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
 MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
 MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
+MODULE_FIRMWARE(BRCMF_USB_43569_FW_NAME);
 
 static struct usb_driver brcmf_usbdrvr = {
        .name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c
new file mode 100644 (file)
index 0000000..5960d82
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <brcmu_wifi.h>
+#include "fwil_types.h"
+#include "dhd.h"
+#include "p2p.h"
+#include "dhd_dbg.h"
+#include "wl_cfg80211.h"
+#include "vendor.h"
+#include "fwil.h"
+
+static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
+                                                struct wireless_dev *wdev,
+                                                const void *data, int len)
+{
+       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct net_device *ndev = cfg_to_ndev(cfg);
+       const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
+       struct sk_buff *reply;
+       int ret, payload, ret_len;
+       void *dcmd_buf = NULL, *wr_pointer;
+       u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+
+       brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set,
+                 cmdhdr->len);
+
+       len -= sizeof(struct brcmf_vndr_dcmd_hdr);
+       ret_len = cmdhdr->len;
+       if (ret_len > 0 || len > 0) {
+               if (len > BRCMF_DCMD_MAXLEN) {
+                       brcmf_err("oversize input buffer %d\n", len);
+                       len = BRCMF_DCMD_MAXLEN;
+               }
+               if (ret_len > BRCMF_DCMD_MAXLEN) {
+                       brcmf_err("oversize return buffer %d\n", ret_len);
+                       ret_len = BRCMF_DCMD_MAXLEN;
+               }
+               payload = max(ret_len, len) + 1;
+               dcmd_buf = vzalloc(payload);
+               if (NULL == dcmd_buf)
+                       return -ENOMEM;
+
+               memcpy(dcmd_buf, (void *)cmdhdr + cmdhdr->offset, len);
+               *(char *)(dcmd_buf + len)  = '\0';
+       }
+
+       if (cmdhdr->set)
+               ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), cmdhdr->cmd,
+                                            dcmd_buf, ret_len);
+       else
+               ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), cmdhdr->cmd,
+                                            dcmd_buf, ret_len);
+       if (ret != 0)
+               goto exit;
+
+       wr_pointer = dcmd_buf;
+       while (ret_len > 0) {
+               msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
+               ret_len -= msglen;
+               payload = msglen + sizeof(msglen);
+               reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+               if (NULL == reply) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               if (nla_put(reply, BRCMF_NLATTR_DATA, msglen, wr_pointer) ||
+                   nla_put_u16(reply, BRCMF_NLATTR_LEN, msglen)) {
+                       kfree_skb(reply);
+                       ret = -ENOBUFS;
+                       break;
+               }
+
+               ret = cfg80211_vendor_cmd_reply(reply);
+               if (ret)
+                       break;
+
+               wr_pointer += msglen;
+       }
+
+exit:
+       vfree(dcmd_buf);
+
+       return ret;
+}
+
+const struct wiphy_vendor_command brcmf_vendor_cmds[] = {
+       {
+               {
+                       .vendor_id = BROADCOM_OUI,
+                       .subcmd = BRCMF_VNDR_CMDS_DCMD
+               },
+               .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+                        WIPHY_VENDOR_CMD_NEED_NETDEV,
+               .doit = brcmf_cfg80211_vndr_cmds_dcmd_handler
+       },
+};
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.h b/drivers/net/wireless/brcm80211/brcmfmac/vendor.h
new file mode 100644 (file)
index 0000000..061b7bf
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _vendor_h_
+#define _vendor_h_
+
+#define BROADCOM_OUI   0x001018
+
+enum brcmf_vndr_cmds {
+       BRCMF_VNDR_CMDS_UNSPEC,
+       BRCMF_VNDR_CMDS_DCMD,
+       BRCMF_VNDR_CMDS_LAST
+};
+
+/**
+ * enum brcmf_nlattrs - nl80211 message attributes
+ *
+ * @BRCMF_NLATTR_LEN: message body length
+ * @BRCMF_NLATTR_DATA: message body
+ */
+enum brcmf_nlattrs {
+       BRCMF_NLATTR_UNSPEC,
+
+       BRCMF_NLATTR_LEN,
+       BRCMF_NLATTR_DATA,
+
+       __BRCMF_NLATTR_AFTER_LAST,
+       BRCMF_NLATTR_MAX = __BRCMF_NLATTR_AFTER_LAST - 1
+};
+
+/**
+ * struct brcmf_vndr_dcmd_hdr - message header for cfg80211 vendor command dcmd
+ *                             support
+ *
+ * @cmd: common dongle cmd definition
+ * @len: length of expecting return buffer
+ * @offset: offset of data buffer
+ * @set: get or set request(optional)
+ * @magic: magic number for verification
+ */
+struct brcmf_vndr_dcmd_hdr {
+       uint cmd;
+       int len;
+       uint offset;
+       uint set;
+       uint magic;
+};
+
+extern const struct wiphy_vendor_command brcmf_vendor_cmds[];
+
+#endif /* _vendor_h_ */
index d8fa276e368b4fc038b62a941191b3d25393b8f6..9682cf213ec46394c14d759d84ffdfc4794eea3c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <net/cfg80211.h>
 #include <net/netlink.h>
 
@@ -33,6 +34,7 @@
 #include "btcoex.h"
 #include "wl_cfg80211.h"
 #include "fwil.h"
+#include "vendor.h"
 
 #define BRCMF_SCAN_IE_LEN_MAX          2048
 #define BRCMF_PNO_VERSION              2
@@ -588,6 +590,12 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
        }
 }
 
+static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc)
+{
+       if ((brcmf_get_chip_info(ifp) >> 4) == 0x4329)
+               brcmf_set_mpc(ifp, mpc);
+}
+
 void brcmf_set_mpc(struct brcmf_if *ifp, int mpc)
 {
        s32 err = 0;
@@ -641,7 +649,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
                        brcmf_err("Scan abort  failed\n");
        }
 
-       brcmf_set_mpc(ifp, 1);
+       brcmf_scan_config_mpc(ifp, 1);
 
        /*
         * e-scan can be initiated by scheduled scan
@@ -920,7 +928,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
                brcmf_err("error (%d)\n", err);
                return err;
        }
-       brcmf_set_mpc(ifp, 0);
+       brcmf_scan_config_mpc(ifp, 0);
        results = (struct brcmf_scan_results *)cfg->escan_info.escan_buf;
        results->version = 0;
        results->count = 0;
@@ -928,7 +936,7 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
 
        err = escan->run(cfg, ifp, request, WL_ESCAN_ACTION_START);
        if (err)
-               brcmf_set_mpc(ifp, 1);
+               brcmf_scan_config_mpc(ifp, 1);
        return err;
 }
 
@@ -1019,7 +1027,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
                        brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
                        goto scan_out;
                }
-               brcmf_set_mpc(ifp, 0);
+               brcmf_scan_config_mpc(ifp, 0);
                err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
                                             &sr->ssid_le, sizeof(sr->ssid_le));
                if (err) {
@@ -1029,7 +1037,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
                        else
                                brcmf_err("WLC_SCAN error (%d)\n", err);
 
-                       brcmf_set_mpc(ifp, 1);
+                       brcmf_scan_config_mpc(ifp, 1);
                        goto scan_out;
                }
        }
@@ -1331,7 +1339,6 @@ static s32
 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
-       s32 err = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
        if (!check_vif_up(ifp->vif))
@@ -1341,7 +1348,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
 
        brcmf_dbg(TRACE, "Exit\n");
 
-       return err;
+       return 0;
 }
 
 static s32 brcmf_set_wpa_version(struct net_device *ndev,
@@ -2388,7 +2395,6 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
        struct cfg80211_bss *bss;
        struct ieee80211_supported_band *band;
        struct brcmu_chan ch;
-       s32 err = 0;
        u16 channel;
        u32 freq;
        u16 notify_capability;
@@ -2438,7 +2444,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
 
        cfg80211_put_bss(wiphy, bss);
 
-       return err;
+       return 0;
 }
 
 static struct brcmf_bss_info_le *
@@ -2690,7 +2696,6 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
 {
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
        s32 status;
-       s32 err = 0;
        struct brcmf_escan_result_le *escan_result_le;
        struct brcmf_bss_info_le *bss_info_le;
        struct brcmf_bss_info_le *bss = NULL;
@@ -2781,7 +2786,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
                                  status);
        }
 exit:
-       return err;
+       return 0;
 }
 
 static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
@@ -3260,35 +3265,6 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
        return 0;
 }
 
-#ifdef CONFIG_NL80211_TESTMODE
-static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
-                                  struct wireless_dev *wdev,
-                                  void *data, int len)
-{
-       struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-       struct net_device *ndev = cfg_to_ndev(cfg);
-       struct brcmf_dcmd *dcmd = data;
-       struct sk_buff *reply;
-       int ret;
-
-       brcmf_dbg(TRACE, "cmd %x set %d buf %p len %d\n", dcmd->cmd, dcmd->set,
-                 dcmd->buf, dcmd->len);
-
-       if (dcmd->set)
-               ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), dcmd->cmd,
-                                            dcmd->buf, dcmd->len);
-       else
-               ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), dcmd->cmd,
-                                            dcmd->buf, dcmd->len);
-       if (ret == 0) {
-               reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
-               nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
-               ret = cfg80211_testmode_reply(reply);
-       }
-       return ret;
-}
-#endif
-
 static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp)
 {
        s32 err;
@@ -3507,7 +3483,6 @@ static s32
 brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
                     struct parsed_vndr_ies *vndr_ies)
 {
-       s32 err = 0;
        struct brcmf_vs_tlv *vndrie;
        struct brcmf_tlv *ie;
        struct parsed_vndr_ie_info *parsed_info;
@@ -3560,7 +3535,7 @@ next:
                        ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len +
                                TLV_HDR_LEN);
        }
-       return err;
+       return 0;
 }
 
 static u32
@@ -4307,7 +4282,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        .crit_proto_start = brcmf_cfg80211_crit_proto_start,
        .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
        .tdls_oper = brcmf_cfg80211_tdls_oper,
-       CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
 };
 
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
@@ -4412,6 +4386,11 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
        brcmf_dbg(INFO, "Registering custom regulatory\n");
        wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
        wiphy_apply_custom_regulatory(wiphy, &brcmf_regdom);
+
+       /* vendor commands/events support */
+       wiphy->vendor_commands = brcmf_vendor_cmds;
+       wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1;
+
        err = wiphy_register(wiphy);
        if (err < 0) {
                brcmf_err("Could not register wiphy device (%d)\n", err);
@@ -4650,7 +4629,6 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
        struct brcmf_if *ifp = netdev_priv(ndev);
        struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
        struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
-       s32 err = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -4676,7 +4654,7 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
                          completed ? "succeeded" : "failed");
        }
        brcmf_dbg(TRACE, "Exit\n");
-       return err;
+       return 0;
 }
 
 static s32
@@ -4768,7 +4746,6 @@ brcmf_notify_roaming_status(struct brcmf_if *ifp,
                            const struct brcmf_event_msg *e, void *data)
 {
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
-       s32 err = 0;
        u32 event = e->event_code;
        u32 status = e->status;
 
@@ -4779,7 +4756,7 @@ brcmf_notify_roaming_status(struct brcmf_if *ifp,
                        brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
        }
 
-       return err;
+       return 0;
 }
 
 static s32
@@ -5057,6 +5034,9 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                        err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
                                                      BRCMF_OBSS_COEX_AUTO);
        }
+       /* clear for now and rely on update later */
+       wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.ht_supported = false;
+       wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap = 0;
 
        err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
        if (err) {
@@ -5625,16 +5605,15 @@ enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp)
        return wdev->iftype;
 }
 
-u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state)
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state)
 {
        struct brcmf_cfg80211_vif *vif;
-       bool result = 0;
 
        list_for_each_entry(vif, &cfg->vif_list, list) {
                if (test_bit(state, &vif->sme_state))
-                       result++;
+                       return true;
        }
-       return result;
+       return false;
 }
 
 static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
index 283c525a44f759d7995d882df93fdb700360abed..f9fb10998e7960d68ca03db0afbf2a7d84f780b2 100644 (file)
@@ -477,7 +477,7 @@ const struct brcmf_tlv *
 brcmf_parse_tlvs(const void *buf, int buflen, uint key);
 u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
                        struct ieee80211_channel *ch);
-u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
+bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, unsigned long state);
 void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
                                  struct brcmf_cfg80211_vif *vif);
 bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
index b0fd807f2b2b6bb18e8dcabb80164fdb6578d16d..57ecc05802e965546a47346e7b8b8914ab83c062 100644 (file)
@@ -1538,11 +1538,7 @@ static s8
 wlc_user_txpwr_antport_to_rfport(struct brcms_phy *pi, uint chan, u32 band,
                                 u8 rate)
 {
-       s8 offset = 0;
-
-       if (!pi->user_txpwr_at_rfport)
-               return offset;
-       return offset;
+       return 0;
 }
 
 void wlc_phy_txpower_recalc_target(struct brcms_phy *pi)
index 3e9f5b25be637d0891352be92cea018e8855bfb4..93869e89aa3dcf9794f56ea505d35d47c34e780c 100644 (file)
@@ -22916,7 +22916,6 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
                break;
        default:
                return;
-               break;
        }
 
        classif_state = wlc_phy_classifier_nphy(pi, 0, 0);
index e23d67e0bfe66806f1498e81fbcb82c4b3675a40..6f1b9aace8b340cea12555bb2dd1eaf777c54d96 100644 (file)
@@ -290,7 +290,6 @@ static int config_reg_write(struct cw1200_common *priv, u32 val)
        case HIF_8601_SILICON:
        default:
                return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val);
-               break;
        }
        return 0;
 }
index 9afcd4ce3368d37908a055df935f29cf4032f259..b2fb6c632092b202d484dcc6d1d73b2840ecf255 100644 (file)
@@ -53,9 +53,10 @@ static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
 
 int cw1200_hw_scan(struct ieee80211_hw *hw,
                   struct ieee80211_vif *vif,
-                  struct cfg80211_scan_request *req)
+                  struct ieee80211_scan_request *hw_req)
 {
        struct cw1200_common *priv = hw->priv;
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct wsm_template_frame frame = {
                .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
        };
index 5a8296ccfa82c962fd0f07ddfec80cfcee01bb14..cc75459e5784d9e8e28a677ba39195e36413e500 100644 (file)
@@ -41,7 +41,7 @@ struct cw1200_scan {
 
 int cw1200_hw_scan(struct ieee80211_hw *hw,
                   struct ieee80211_vif *vif,
-                  struct cfg80211_scan_request *req);
+                  struct ieee80211_scan_request *hw_req);
 void cw1200_scan_work(struct work_struct *work);
 void cw1200_scan_timeout(struct work_struct *work);
 void cw1200_clear_recent_scan_work(struct work_struct *work);
index cd0cad7f775993661af9e8f4577c89976b15b811..5b84664db13b3c0c36d27d2ef74d87cff618b2c7 100644 (file)
@@ -2289,7 +2289,6 @@ static int cw1200_upload_null(struct cw1200_common *priv)
 
 static int cw1200_upload_qosnull(struct cw1200_common *priv)
 {
-       int ret = 0;
        /* TODO:  This needs to be implemented
 
        struct wsm_template_frame frame = {
@@ -2306,7 +2305,7 @@ static int cw1200_upload_qosnull(struct cw1200_common *priv)
        dev_kfree_skb(frame.skb);
 
        */
-       return ret;
+       return 0;
 }
 
 static int cw1200_enable_beaconing(struct cw1200_common *priv,
index 3adb24021a282703a0966e79a61fa5f22af128fb..5f31b72a492181657f784408a3d33a11b418f221 100644 (file)
@@ -100,8 +100,7 @@ static inline void libipw_networks_free(struct libipw_device *ieee)
        int i;
 
        for (i = 0; i < MAX_NETWORK_COUNT; i++) {
-               if (ieee->networks[i]->ibss_dfs)
-                       kfree(ieee->networks[i]->ibss_dfs);
+               kfree(ieee->networks[i]->ibss_dfs);
                kfree(ieee->networks[i]);
        }
 }
index ecc674627e6e10b30a5a7b11ab3150c1ad42b37e..03de7467aecf7b63fef9ae007a15c13dbe5594f0 100644 (file)
@@ -1572,8 +1572,9 @@ il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
 
 int
 il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-              struct cfg80211_scan_request *req)
+              struct ieee80211_scan_request *hw_req)
 {
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct il_priv *il = hw->priv;
        int ret;
 
index ea5c0f863c4ee35b2cf6738569a5cb3253553c12..5b972798bdffb66d6acd242e80765fc574ce83af 100644 (file)
@@ -1787,7 +1787,7 @@ int il_scan_cancel(struct il_priv *il);
 int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
 void il_force_scan_end(struct il_priv *il);
 int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  struct cfg80211_scan_request *req);
+                  struct ieee80211_scan_request *hw_req);
 void il_internal_short_hw_scan(struct il_priv *il);
 int il_force_reset(struct il_priv *il, bool external);
 u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
index 29af7b51e3708788d02f4a1651205a348a5102dd..afb98f4fdaf3608afd3c9ab844ed34c8dff6851a 100644 (file)
@@ -1495,9 +1495,10 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
 
 static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
-                             struct cfg80211_scan_request *req)
+                             struct ieee80211_scan_request *hw_req)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+       struct cfg80211_scan_request *req = &hw_req->req;
        int ret;
 
        IWL_DEBUG_MAC80211(priv, "enter\n");
index f2c1439566b5ffa7814c02c041bf34bb3b3391d6..760c45c34ef36ea18a90c7762afac5c602796d3d 100644 (file)
 #include "commands.h"
 #include "power.h"
 
+static bool force_cam;
+module_param(force_cam, bool, 0644);
+MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
+
 /*
  * Setting power level allows the card to go to sleep when not busy.
  *
@@ -288,6 +292,11 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
        bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
        int dtimper;
 
+       if (force_cam) {
+               iwl_power_sleep_cam_cmd(priv, cmd);
+               return;
+       }
+
        dtimper = priv->hw->conf.ps_dtim_period ?: 1;
 
        if (priv->wowlan)
index 51c41531d81d7f5af8354aced5da7e6b4e647f78..51486cc9d943d21d79608980cf04e1fde494267f 100644 (file)
@@ -67,7 +67,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  8
+#define IWL8000_UCODE_API_MAX  9
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   8
@@ -119,10 +119,9 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file = DEFAULT_NVM_FILE_FAMILY_8000,
 };
 
-const struct iwl_cfg iwl8260_n_cfg = {
+const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .name = "Intel(R) Dual Band Wireless-AC 8260",
        .fw_name_pre = IWL8000_FW_PRE,
        IWL_DEVICE_8000,
index b7047905f41a38ce6a88f27b93ad75384306958a..034c2fc4b69ffcbb3505b8e96b283ae1593310ad 100644 (file)
@@ -337,7 +337,7 @@ extern const struct iwl_cfg iwl7265_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2n_cfg;
 extern const struct iwl_cfg iwl7265_n_cfg;
 extern const struct iwl_cfg iwl8260_2ac_cfg;
-extern const struct iwl_cfg iwl8260_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index f2a5c12269a3ed7de811399580c9ea908bc89173..77e3178040b2a80716ff97cb3c4771b356682897 100644 (file)
@@ -155,6 +155,8 @@ static struct iwlwifi_opmode_table {
        [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
 };
 
+#define IWL_DEFAULT_SCAN_CHANNELS 40
+
 /*
  * struct fw_sec: Just for the image parsing proccess.
  * For the fw storage we are using struct fw_desc.
@@ -565,6 +567,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        }
 
        drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+       memcpy(drv->fw.human_readable, ucode->human_readable,
+              sizeof(drv->fw.human_readable));
        build = le32_to_cpu(ucode->build);
 
        if (build)
@@ -819,6 +823,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
                                goto invalid_tlv_len;
                        break;
+               case IWL_UCODE_TLV_N_SCAN_CHANNELS:
+                       if (tlv_len != sizeof(u32))
+                               goto invalid_tlv_len;
+                       capa->n_scan_channels =
+                               le32_to_cpup((__le32 *)tlv_data);
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -973,6 +983,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
        fw->ucode_capa.standard_phy_calibration_size =
                        IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+       fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
 
        if (!api_ok)
                api_ok = api_max;
@@ -1394,3 +1405,7 @@ module_param_named(power_level, iwlwifi_mod_params.power_level,
                int, S_IRUGO);
 MODULE_PARM_DESC(power_level,
                 "default power save level (range from 1 - 5, default: 1)");
+
+module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, S_IRUGO);
+MODULE_PARM_DESC(fw_monitor,
+                "firmware monitor - to debug FW (default: false - needs lots of memory)");
index c44cf1149648860e4e8faa4630140e8bb76ae6b3..07ff7e0028ee81518f0b7b2db618787cd5d1882e 100644 (file)
@@ -779,7 +779,6 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
        if (cfg->ht_params->ht40_bands & BIT(band)) {
                ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
                ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
-               ht_info->mcs.rx_mask[4] = 0x01;
                max_bit_rate = MAX_BIT_RATE_40_MHZ;
        }
 
index 2953ffceda3881d29be37812544a374816b2130c..c39a0b899e83aef7b780786b01f90e29039d9259 100644 (file)
  * @IWL_FW_ERROR_DUMP_RXF:
  * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
  *     &struct iwl_fw_error_dump_txcmd packets
+ * @IWL_FW_ERROR_DUMP_DEV_FW_INFO:  struct %iwl_fw_error_dump_info
+ *     info on the device / firmware.
+ * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
  */
 enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_SRAM = 0,
        IWL_FW_ERROR_DUMP_REG = 1,
        IWL_FW_ERROR_DUMP_RXF = 2,
        IWL_FW_ERROR_DUMP_TXCMD = 3,
+       IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
+       IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -87,8 +92,8 @@ enum iwl_fw_error_dump_type {
 /**
  * struct iwl_fw_error_dump_data - data for one type
  * @type: %enum iwl_fw_error_dump_type
- * @len: the length starting from %data - must be a multiplier of 4.
- * @data: the data itself padded to be a multiplier of 4.
+ * @len: the length starting from %data
+ * @data: the data itself
  */
 struct iwl_fw_error_dump_data {
        __le32 type;
@@ -120,13 +125,50 @@ struct iwl_fw_error_dump_txcmd {
        u8 data[];
 } __packed;
 
+enum iwl_fw_error_dump_family {
+       IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
+       IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
+};
+
+/**
+ * struct iwl_fw_error_dump_info - info on the device / firmware
+ * @device_family: the family of the device (7 / 8)
+ * @hw_step: the step of the device
+ * @fw_human_readable: human readable FW version
+ * @dev_human_readable: name of the device
+ * @bus_human_readable: name of the bus used
+ */
+struct iwl_fw_error_dump_info {
+       __le32 device_family;
+       __le32 hw_step;
+       u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
+       u8 dev_human_readable[64];
+       u8 bus_human_readable[8];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_fw_mon - FW monitor data
+ * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
+ * @fw_mon_base_ptr: base pointer of the data
+ * @fw_mon_cycle_cnt: number of wrap arounds
+ * @reserved: for future use
+ * @data: captured data
+ */
+struct iwl_fw_error_dump_fw_mon {
+       __le32 fw_mon_wr_ptr;
+       __le32 fw_mon_base_ptr;
+       __le32 fw_mon_cycle_cnt;
+       __le32 reserved[3];
+       u8 data[];
+} __packed;
+
 /**
- * iwl_mvm_fw_error_next_data - advance fw error dump data pointer
+ * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * Returns: next data block
  */
 static inline struct iwl_fw_error_dump_data *
-iwl_mvm_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
 {
        return (void *)(data->data + le32_to_cpu(data->len));
 }
index b45e576a4b57feb01982edb99191a65a3ae38bca..929a8063354ca43e456b9f46fbc9d4e51c22a8a1 100644 (file)
@@ -128,6 +128,7 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_CSCHEME           = 28,
        IWL_UCODE_TLV_API_CHANGES_SET   = 29,
        IWL_UCODE_TLV_ENABLED_CAPABILITIES      = 30,
+       IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
 };
 
 struct iwl_ucode_tlv {
@@ -136,7 +137,8 @@ struct iwl_ucode_tlv {
        u8 data[0];
 };
 
-#define IWL_TLV_UCODE_MAGIC    0x0a4c5749
+#define IWL_TLV_UCODE_MAGIC            0x0a4c5749
+#define FW_VER_HUMAN_READABLE_SZ       64
 
 struct iwl_tlv_ucode_header {
        /*
@@ -147,7 +149,7 @@ struct iwl_tlv_ucode_header {
         */
        __le32 zero;
        __le32 magic;
-       u8 human_readable[64];
+       u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
        __le32 ver;             /* major/minor/API/serial */
        __le32 build;
        __le64 ignore;
index b1a33322b9bac9534e02a65c8e431d387f509542..1bb5193c5b1b5097c11bbf5a986f4c4d6e409c58 100644 (file)
@@ -65,6 +65,8 @@
 #include <linux/types.h>
 #include <net/mac80211.h>
 
+#include "iwl-fw-file.h"
+
 /**
  * enum iwl_ucode_tlv_flag - ucode API flags
  * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
@@ -118,11 +120,19 @@ enum iwl_ucode_tlv_flag {
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_BEACON: Support Extended beacon notification
+ * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
  * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA.
+ * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit.
+ * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
+       IWL_UCODE_TLV_CAPA_EXTENDED_BEACON      = BIT(1),
+       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
        IWL_UCODE_TLV_API_CSA_FLOW              = BIT(4),
+       IWL_UCODE_TLV_API_DISABLE_STA_TX        = BIT(5),
+       IWL_UCODE_TLV_API_LMAC_SCAN             = BIT(6),
 };
 
 /**
@@ -179,6 +189,7 @@ enum iwl_ucode_sec {
 
 struct iwl_ucode_capabilities {
        u32 max_probe_length;
+       u32 n_scan_channels;
        u32 standard_phy_calibration_size;
        u32 flags;
        u32 api[IWL_API_ARRAY_SIZE];
@@ -312,6 +323,7 @@ struct iwl_fw {
        bool mvm_fw;
 
        struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
+       u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
 };
 
 #endif  /* __iwl_fw_h__ */
index d051857729ab8e2f238115165c482d09033147db..f2d39cb011fc10442ce4dc6342602e5130b7c4d4 100644 (file)
@@ -103,6 +103,7 @@ enum iwl_disable_11n {
  * @power_level: power level, default = 1
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
+ * @fw_monitor: allow to use firmware monitor
  */
 struct iwl_mod_params {
        int sw_crypto;
@@ -120,6 +121,7 @@ struct iwl_mod_params {
        int ant_coupling;
        char *nvm_file;
        bool uapsd_disable;
+       bool fw_monitor;
 };
 
 #endif /* #__iwl_modparams_h__ */
index 85eee79c495c8f1080d9844a3f71ffad1b0d023d..018af2957d3b7b319c47222eae4d1dfe1cdc3891 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/etherdevice.h>
+#include <linux/pci.h>
 #include "iwl-drv.h"
 #include "iwl-modparams.h"
 #include "iwl-nvm-parse.h"
@@ -87,8 +88,10 @@ enum wkp_nvm_offsets {
 
 enum family_8000_nvm_offsets {
        /* NVM HW-Section offset (in words) definitions */
-       HW_ADDR0_FAMILY_8000 = 0x12,
-       HW_ADDR1_FAMILY_8000 = 0x16,
+       HW_ADDR0_WFPM_FAMILY_8000 = 0x12,
+       HW_ADDR1_WFPM_FAMILY_8000 = 0x16,
+       HW_ADDR0_PCIE_FAMILY_8000 = 0x8A,
+       HW_ADDR1_PCIE_FAMILY_8000 = 0x8E,
        MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
 
        /* NVM SW-Section offset (in words) definitions */
@@ -174,7 +177,9 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
  * @NVM_CHANNEL_IBSS: usable as an IBSS channel
  * @NVM_CHANNEL_ACTIVE: active scanning allowed
  * @NVM_CHANNEL_RADAR: radar detection required
- * @NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ *     on same channel on 2.4 or same UNII band on 5.2
  * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
  * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
  * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
@@ -185,7 +190,8 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_IBSS = BIT(1),
        NVM_CHANNEL_ACTIVE = BIT(3),
        NVM_CHANNEL_RADAR = BIT(4),
-       NVM_CHANNEL_DFS = BIT(7),
+       NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+       NVM_CHANNEL_GO_CONCURRENT = BIT(6),
        NVM_CHANNEL_WIDE = BIT(8),
        NVM_CHANNEL_40MHZ = BIT(9),
        NVM_CHANNEL_80MHZ = BIT(10),
@@ -273,6 +279,16 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                if (ch_flags & NVM_CHANNEL_RADAR)
                        channel->flags |= IEEE80211_CHAN_RADAR;
 
+               if (ch_flags & NVM_CHANNEL_INDOOR_ONLY)
+                       channel->flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+               /* Set the GO concurrent flag only in case that NO_IR is set.
+                * Otherwise it is meaningless
+                */
+               if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+                   (channel->flags & IEEE80211_CHAN_NO_IR))
+                       channel->flags |= IEEE80211_CHAN_GO_CONCURRENT;
+
                /* Initialize regulatory-based run-time data */
 
                /*
@@ -282,7 +298,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                channel->max_power = DEFAULT_MAX_TX_POWER;
                is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
                IWL_DEBUG_EEPROM(dev,
-                                "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+                                "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
                                 channel->hw_value,
                                 is_5ghz ? "5.2" : "2.4",
                                 CHECK_AND_PRINT_I(VALID),
@@ -290,7 +306,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                                 CHECK_AND_PRINT_I(ACTIVE),
                                 CHECK_AND_PRINT_I(RADAR),
                                 CHECK_AND_PRINT_I(WIDE),
-                                CHECK_AND_PRINT_I(DFS),
+                                CHECK_AND_PRINT_I(INDOOR_ONLY),
+                                CHECK_AND_PRINT_I(GO_CONCURRENT),
                                 ch_flags,
                                 channel->max_power,
                                 ((ch_flags & NVM_CHANNEL_IBSS) &&
@@ -462,7 +479,8 @@ static void iwl_set_hw_address(const struct iwl_cfg *cfg,
        data->hw_addr[5] = hw_addr[4];
 }
 
-static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
+static void iwl_set_hw_address_family_8000(struct device *dev,
+                                          const struct iwl_cfg *cfg,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
                                           const __le16 *nvm_hw)
@@ -481,20 +499,64 @@ static void iwl_set_hw_address_family_8000(const struct iwl_cfg *cfg,
                data->hw_addr[4] = hw_addr[5];
                data->hw_addr[5] = hw_addr[4];
 
-               if (is_valid_ether_addr(hw_addr))
+               if (is_valid_ether_addr(data->hw_addr))
                        return;
+
+               IWL_ERR_DEV(dev,
+                           "mac address from nvm override section is not valid\n");
        }
 
-       /* take the MAC address from the OTP */
-       hw_addr = (const u8 *)(nvm_hw + HW_ADDR0_FAMILY_8000);
-       data->hw_addr[0] = hw_addr[3];
-       data->hw_addr[1] = hw_addr[2];
-       data->hw_addr[2] = hw_addr[1];
-       data->hw_addr[3] = hw_addr[0];
+       if (nvm_hw) {
+               /* read the MAC address from OTP */
+               if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) {
+                       /* read the mac address from the WFPM location */
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR0_WFPM_FAMILY_8000);
+                       data->hw_addr[0] = hw_addr[3];
+                       data->hw_addr[1] = hw_addr[2];
+                       data->hw_addr[2] = hw_addr[1];
+                       data->hw_addr[3] = hw_addr[0];
+
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR1_WFPM_FAMILY_8000);
+                       data->hw_addr[4] = hw_addr[1];
+                       data->hw_addr[5] = hw_addr[0];
+               } else if ((data->nvm_version >= 0xE08) &&
+                          (data->nvm_version < 0xE0B)) {
+                       /* read "reverse order"  from the PCIe location */
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR0_PCIE_FAMILY_8000);
+                       data->hw_addr[5] = hw_addr[2];
+                       data->hw_addr[4] = hw_addr[1];
+                       data->hw_addr[3] = hw_addr[0];
+
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR1_PCIE_FAMILY_8000);
+                       data->hw_addr[2] = hw_addr[3];
+                       data->hw_addr[1] = hw_addr[2];
+                       data->hw_addr[0] = hw_addr[1];
+               } else {
+                       /* read from the PCIe location */
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR0_PCIE_FAMILY_8000);
+                       data->hw_addr[5] = hw_addr[0];
+                       data->hw_addr[4] = hw_addr[1];
+                       data->hw_addr[3] = hw_addr[2];
+
+                       hw_addr = (const u8 *)(nvm_hw +
+                                              HW_ADDR1_PCIE_FAMILY_8000);
+                       data->hw_addr[2] = hw_addr[1];
+                       data->hw_addr[1] = hw_addr[2];
+                       data->hw_addr[0] = hw_addr[3];
+               }
+               if (!is_valid_ether_addr(data->hw_addr))
+                       IWL_ERR_DEV(dev,
+                                   "mac address from hw section is not valid\n");
+
+               return;
+       }
 
-       hw_addr = (const u8 *)(nvm_hw + HW_ADDR1_FAMILY_8000);
-       data->hw_addr[4] = hw_addr[1];
-       data->hw_addr[5] = hw_addr[0];
+       IWL_ERR_DEV(dev, "mac address is not found\n");
 }
 
 struct iwl_nvm_data *
@@ -556,7 +618,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                                rx_chains);
        } else {
                /* MAC address in family 8000 */
-               iwl_set_hw_address_family_8000(cfg, data, mac_override, nvm_hw);
+               iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
+                                              nvm_hw);
 
                iwl_init_sbands(dev, cfg, data, regulatory,
                                sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
index 4997e27672b3ae22b4176df391a5bbb3ae9e1f4d..47033a35a40293f100312ede34bfc3a5b43c9efd 100644 (file)
@@ -359,4 +359,10 @@ enum secure_load_status_reg {
 #define RXF_LD_FENCE_OFFSET_ADDR       (0xa00c10)
 #define RXF_FIFO_RD_FENCE_ADDR         (0xa00c0c)
 
+/* FW monitor */
+#define MON_BUFF_BASE_ADDR             (0xa03c3c)
+#define MON_BUFF_END_ADDR              (0xa03c40)
+#define MON_BUFF_WRPTR                 (0xa03c44)
+#define MON_BUFF_CYCLE_CNT             (0xa03c48)
+
 #endif                         /* __iwl_prph_h__ */
index c30d7f64ec1e4e1c47a635e091c4e503c2ceaa83..a28235913c2cf94c5b07b9c0e3dc3c8768a7d990 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o
+iwlmvm-y += power.o coex.o coex_legacy.o
 iwlmvm-y += tt.o offloading.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
index c8c3b38228f02f9768b780a7bdd31273f49e9541..8110fe00bf5512635e5a050655c4d5ea7d011d68 100644 (file)
 #include "mvm.h"
 #include "iwl-debug.h"
 
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
-       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
-                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
-                      BT_COEX_PRIO_TBL_DISABLED, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
-       0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD    (-62)
-#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
 #define BT_ANTENNA_COUPLING_THRESHOLD          (30)
 
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
-                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
-                                   &iwl_bt_prio_tbl);
-}
-
 const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
        [BT_KILL_MSK_DEFAULT] = 0xffff0000,
        [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
@@ -519,6 +480,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
        struct ieee80211_chanctx_conf *chanctx_conf;
        enum iwl_bt_coex_lut_type ret;
        u16 phy_ctx_id;
+       u32 primary_ch_phy_id, secondary_ch_phy_id;
 
        /*
         * Checking that we hold mvm->mutex is a good idea, but the rate
@@ -535,7 +497,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
        if (!chanctx_conf ||
             chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
                rcu_read_unlock();
-               return BT_COEX_LOOSE_LUT;
+               return BT_COEX_INVALID_LUT;
        }
 
        ret = BT_COEX_TX_DIS_LUT;
@@ -546,10 +508,13 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
        }
 
        phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+       primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
+       secondary_ch_phy_id =
+               le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
 
-       if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+       if (primary_ch_phy_id == phy_ctx_id)
                ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
-       else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+       else if (secondary_ch_phy_id == phy_ctx_id)
                ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
        /* else - default = TX TX disallowed */
 
@@ -567,59 +532,63 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
        int ret;
-       u32 flags;
+       u32 mode;
 
-       ret = iwl_send_bt_prio_tbl(mvm);
-       if (ret)
-               return ret;
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_send_bt_init_conf_old(mvm);
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
                return -ENOMEM;
        cmd.data[0] = bt_cmd;
 
-       bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
-       bt_cmd->bt4_tx_rx_max_freq0 = 15;
-       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
-       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
-       flags = iwlwifi_mod_params.bt_coex_active ?
-                       BT_COEX_NW : BT_COEX_DISABLE;
-       bt_cmd->flags = cpu_to_le32(flags);
-
-       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
-                                           BT_VALID_BT_PRIO_BOOST |
-                                           BT_VALID_MAX_KILL |
-                                           BT_VALID_3W_TMRS |
-                                           BT_VALID_KILL_ACK |
-                                           BT_VALID_KILL_CTS |
-                                           BT_VALID_REDUCED_TX_POWER |
-                                           BT_VALID_LUT |
-                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
-                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
-                                           BT_VALID_ANT_ISOLATION |
-                                           BT_VALID_ANT_ISOLATION_THRS |
-                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
-                                           BT_VALID_TXRX_MAX_FREQ_0 |
-                                           BT_VALID_SYNC_TO_SCO);
+       lockdep_assert_held(&mvm->mutex);
 
-       if (IWL_MVM_BT_COEX_SYNC2SCO)
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+               u32 mode;
 
-       if (IWL_MVM_BT_COEX_CORUNNING) {
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
-                                                    BT_VALID_CORUN_LUT_40);
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
+               switch (mvm->bt_force_ant_mode) {
+               case BT_FORCE_ANT_BT:
+                       mode = BT_COEX_BT;
+                       break;
+               case BT_FORCE_ANT_WIFI:
+                       mode = BT_COEX_WIFI;
+                       break;
+               default:
+                       WARN_ON(1);
+                       mode = 0;
+               }
+
+               bt_cmd->mode = cpu_to_le32(mode);
+               goto send_cmd;
        }
 
+       bt_cmd->max_kill = cpu_to_le32(5);
+       bt_cmd->bt4_antenna_isolation_thr =
+                               cpu_to_le32(BT_ANTENNA_COUPLING_THRESHOLD);
+       bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15);
+       bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15);
+       bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
+       bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT);
+
+       mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
+       bt_cmd->mode = cpu_to_le32(mode);
+
+       if (IWL_MVM_BT_COEX_SYNC2SCO)
+               bt_cmd->enabled_modules |=
+                       cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
+
+       if (IWL_MVM_BT_COEX_CORUNNING)
+               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+
        if (IWL_MVM_BT_COEX_MPLUT) {
-               bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
-               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+               bt_cmd->enabled_modules |=
+                       cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
        }
 
+       bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+
        if (mvm->cfg->bt_shared_single_ant)
                memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
                       sizeof(iwl_single_shared_ant));
@@ -627,21 +596,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
                       sizeof(iwl_combined_lookup));
 
-       /* Take first Co-running block LUT to get started */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
-
-       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+       memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost,
               sizeof(iwl_bt_prio_boost));
-       memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+       memcpy(&bt_cmd->multiprio_lut, iwl_bt_mprio_lut,
               sizeof(iwl_bt_mprio_lut));
-       bt_cmd->kill_ack_msk =
-               cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
-       bt_cmd->kill_cts_msk =
-               cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
 
+send_cmd:
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
@@ -651,19 +611,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        return ret;
 }
 
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
-                                          bool reduced_tx_power)
+static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm,
+                                     bool reduced_tx_power)
 {
        enum iwl_bt_kill_msk bt_kill_msk;
-       struct iwl_bt_coex_cmd *bt_cmd;
+       struct iwl_bt_coex_sw_boost_update_cmd cmd = {};
        struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .data[0] = &bt_cmd,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret = 0;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -693,40 +646,30 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
 
        mvm->bt_kill_msk = bt_kill_msk;
 
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+       cmd.boost_values[0].kill_ack_msk =
+               cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+       cmd.boost_values[0].kill_cts_msk =
+               cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
 
-       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
-       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_KILL_ACK |
-                                            BT_VALID_KILL_CTS);
+       cmd.boost_values[1].kill_ack_msk = cmd.boost_values[0].kill_ack_msk;
+       cmd.boost_values[2].kill_cts_msk = cmd.boost_values[0].kill_cts_msk;
+       cmd.boost_values[1].kill_ack_msk = cmd.boost_values[0].kill_ack_msk;
+       cmd.boost_values[2].kill_cts_msk = cmd.boost_values[0].kill_cts_msk;
 
        IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
                       iwl_bt_ack_kill_msk[bt_kill_msk],
                       iwl_bt_cts_kill_msk[bt_kill_msk]);
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
+       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0,
+                                   sizeof(cmd), &cmd);
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
                                       bool enable)
 {
-       struct iwl_bt_coex_cmd *bt_cmd;
-       /* Send ASYNC since this can be sent from an atomic context */
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_ASYNC,
-       };
+       struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
        struct iwl_mvm_sta *mvmsta;
+       u32 value;
        int ret;
 
        mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
@@ -737,27 +680,20 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        if (mvmsta->bt_reduced_txpower == enable)
                return 0;
 
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
-
-       bt_cmd->valid_bit_msk =
-               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
-       bt_cmd->bt_reduced_tx_power = sta_id;
+       value = mvmsta->sta_id;
 
        if (enable)
-               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+               value |= BT_REDUCED_TX_POWER_BIT;
 
        IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
                       enable ? "en" : "dis", sta_id);
 
+       cmd.reduced_txp = cpu_to_le32(value);
        mvmsta->bt_reduced_txpower = enable;
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
+                                  sizeof(cmd), &cmd);
 
-       kfree(bt_cmd);
        return ret;
 }
 
@@ -780,9 +716,9 @@ void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
 
        mvmvif->bf_data.last_bt_coex_event = rssi;
        mvmvif->bf_data.bt_coex_max_thold =
-               enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+               enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
        mvmvif->bf_data.bt_coex_min_thold =
-               enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+               enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
 }
 
 /* must be called under rcu_read_lock */
@@ -851,10 +787,13 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        if (!vif->bss_conf.assoc)
                smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
+       if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status,
+                              mvmvif->phy_ctxt->id))
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
        IWL_DEBUG_COEX(data->mvm,
-                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
-                      mvmvif->id, data->notif->bt_status, bt_activity_grading,
-                      smps_mode);
+                      "mac %d: bt_activity_grading %d smps_req %d\n",
+                      mvmvif->id, bt_activity_grading, smps_mode);
 
        iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
 
@@ -906,7 +845,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
         */
        if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
            mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
-           !data->notif->bt_status) {
+           le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
                data->reduced_tx_power = false;
                iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
                iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
@@ -919,7 +858,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        /* if the RSSI isn't valid, fake it is very low */
        if (!ave_rssi)
                ave_rssi = -100;
-       if (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) {
+       if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
                if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
                        IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
 
@@ -930,7 +869,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                 * the iteration, if one interface's rssi isn't good enough,
                 * bt_kill_msk will be set to default values.
                 */
-       } else if (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) {
+       } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
                if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
                        IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
 
@@ -955,6 +894,10 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
        struct iwl_bt_coex_ci_cmd cmd = {};
        u8 ci_bw_idx;
 
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return;
+
        rcu_read_lock();
        ieee80211_iterate_active_interfaces_atomic(
                                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -969,9 +912,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
 
                if (chan->def.width < NL80211_CHAN_WIDTH_40) {
                        ci_bw_idx = 0;
-                       cmd.co_run_bw_primary = 0;
                } else {
-                       cmd.co_run_bw_primary = 1;
                        if (chan->def.center_freq1 >
                            chan->def.chan->center_freq)
                                ci_bw_idx = 2;
@@ -981,7 +922,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
 
                cmd.bt_primary_ci =
                        iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+               cmd.primary_ch_phy_id =
+                       cpu_to_le32(*((u16 *)data.primary->drv_priv));
        }
 
        if (data.secondary) {
@@ -993,9 +935,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
 
                if (chan->def.width < NL80211_CHAN_WIDTH_40) {
                        ci_bw_idx = 0;
-                       cmd.co_run_bw_secondary = 0;
                } else {
-                       cmd.co_run_bw_secondary = 1;
                        if (chan->def.center_freq1 >
                            chan->def.chan->center_freq)
                                ci_bw_idx = 2;
@@ -1005,7 +945,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
 
                cmd.bt_secondary_ci =
                        iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+               cmd.secondary_ch_phy_id =
+                       cpu_to_le32(*((u16 *)data.secondary->drv_priv));
        }
 
        rcu_read_unlock();
@@ -1025,7 +966,7 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
         */
        data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
 
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+       if (iwl_mvm_bt_udpate_sw_boost(mvm, data.reduced_tx_power))
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
@@ -1036,11 +977,10 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
-                      notif->bt_status ? "ON" : "OFF");
-       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
        IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
                       le32_to_cpu(notif->primary_ch_lut));
@@ -1048,8 +988,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                       le32_to_cpu(notif->secondary_ch_lut));
        IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
                       le32_to_cpu(notif->bt_activity_grading));
-       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
-                      notif->bt_agg_traffic_load);
 
        /* remember this notification for future use: rssi fluctuations */
        memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -1119,8 +1057,17 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        };
        int ret;
 
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
+               return;
+       }
+
        lockdep_assert_held(&mvm->mutex);
 
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return;
+
        /*
         * Rssi update while not associated - can happen since the statistics
         * are handled asynchronously
@@ -1129,7 +1076,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return;
 
        /* No BT - reports should be disabled */
-       if (!mvm->last_bt_notif.bt_status)
+       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
                return;
 
        IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
@@ -1160,7 +1107,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         */
        data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
 
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+       if (iwl_mvm_bt_udpate_sw_boost(mvm, data.reduced_tx_power))
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
@@ -1171,15 +1118,23 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
+
+       if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
        if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
            BT_HIGH_TRAFFIC)
                return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
        lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
 
-       if (lut_type == BT_COEX_LOOSE_LUT)
+       if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
                return LINK_QUAL_AGG_TIME_LIMIT_DEF;
 
        /* tight coex, high bt traffic, reduce AGG time limit */
@@ -1190,18 +1145,37 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+       struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
+       enum iwl_bt_coex_lut_type lut_type;
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
+
+       if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
+               return true;
 
        if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
            BT_HIGH_TRAFFIC)
                return true;
 
        /*
-        * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
-        * already killed.
-        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
-        * Tx.
+        * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+        * since BT is already killed.
+        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+        * we Tx.
+        * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
         */
-       return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
+       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+       return lut_type != BT_COEX_LOOSE_LUT;
+}
+
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
+{
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
+
+       return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -1209,6 +1183,9 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 {
        u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
+
        if (band != IEEE80211_BAND_2GHZ)
                return false;
 
@@ -1249,6 +1226,11 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_bt_coex_vif_change_old(mvm);
+               return;
+       }
+
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
@@ -1258,22 +1240,22 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+       struct iwl_bt_coex_corun_lut_update_cmd cmd = {};
        u8 __maybe_unused lower_bound, upper_bound;
-       int ret;
        u8 lut;
 
-       struct iwl_bt_coex_cmd *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+               return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
        if (!IWL_MVM_BT_COEX_CORUNNING)
                return 0;
 
        lockdep_assert_held(&mvm->mutex);
 
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return 0;
+
        if (ant_isolation ==  mvm->last_ant_isol)
                return 0;
 
@@ -1298,25 +1280,13 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
 
        mvm->last_corun_lut = lut;
 
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return 0;
-       cmd.data[0] = bt_cmd;
-
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_CORUN_LUT_20 |
-                                            BT_VALID_CORUN_LUT_40);
-
        /* For the moment, use the same LUT for 20GHz and 40GHz */
-       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut20));
-
-       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
-              sizeof(bt_cmd->bt4_corun_lut40));
+       memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20,
+              sizeof(cmd.corun_lut20));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
+              sizeof(cmd.corun_lut40));
 
-       kfree(bt_cmd);
-       return ret;
+       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+                                   sizeof(cmd), &cmd);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
new file mode 100644 (file)
index 0000000..ce50363
--- /dev/null
@@ -0,0 +1,1332 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "fw-api-coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
+       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
+                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
+                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
+                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
+                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
+                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
+                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
+                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
+                      BT_COEX_PRIO_TBL_DISABLED, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
+       0, 0, 0, 0, 0, 0,
+};
+
+#undef EVENT_PRIO_ANT
+
+#define BT_ANTENNA_COUPLING_THRESHOLD          (30)
+
+static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+{
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return 0;
+
+       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
+                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
+                                   &iwl_bt_prio_tbl);
+}
+
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+       cpu_to_le32(0xf0f0f0f0), /* 50% */
+       cpu_to_le32(0xc0c0c0c0), /* 25% */
+       cpu_to_le32(0xfcfcfcfc), /* 75% */
+       cpu_to_le32(0xfefefefe), /* 87.5% */
+};
+
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+};
+
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               /* Tight */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaeaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0x00004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Loose */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Tx Tx disabled */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xeeaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+};
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+       /* dummy entry for channel 0 */
+       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+       {
+               cpu_to_le64(0x0000001FFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000000FFFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000003FFFCULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+       },
+       {
+               cpu_to_le64(0x00001FFFE0ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+       },
+       {
+               cpu_to_le64(0x00007FFF80ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+       },
+       {
+               cpu_to_le64(0x0003FFFC00ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+       },
+       {
+               cpu_to_le64(0x000FFFF000ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+       },
+       {
+               cpu_to_le64(0x007FFF8000ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+       },
+       {
+               cpu_to_le64(0x01FFFE0000ULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+       },
+       {
+               cpu_to_le64(0x0FFFF00000ULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+               cpu_to_le64(0x0ULL),
+       },
+       {
+               cpu_to_le64(0x3FFFC00000ULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFFE000000ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFF8000000ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFC0000000ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0ULL)
+       },
+};
+
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+       cpu_to_le32(0x28412201),
+       cpu_to_le32(0x11118451),
+};
+
+struct corunning_block_luts {
+       u8 range;
+       __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
+};
+
+/*
+ * Ranges for the antenna coupling calibration / co-running block LUT:
+ *             LUT0: [ 0, 12[
+ *             LUT1: [12, 20[
+ *             LUT2: [20, 21[
+ *             LUT3: [21, 23[
+ *             LUT4: [23, 27[
+ *             LUT5: [27, 30[
+ *             LUT6: [30, 32[
+ *             LUT7: [32, 33[
+ *             LUT8: [33, - [
+ */
+static const struct corunning_block_luts antenna_coupling_ranges[] = {
+       {
+               .range = 0,
+               .lut20 = {
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 12,
+               .lut20 = {
+                       cpu_to_le32(0x00000001),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 20,
+               .lut20 = {
+                       cpu_to_le32(0x00000002),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 21,
+               .lut20 = {
+                       cpu_to_le32(0x00000003),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 23,
+               .lut20 = {
+                       cpu_to_le32(0x00000004),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 27,
+               .lut20 = {
+                       cpu_to_le32(0x00000005),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 30,
+               .lut20 = {
+                       cpu_to_le32(0x00000006),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 32,
+               .lut20 = {
+                       cpu_to_le32(0x00000007),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 33,
+               .lut20 = {
+                       cpu_to_le32(0x00000008),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum iwl_bt_coex_lut_type ret;
+       u16 phy_ctx_id;
+
+       /*
+        * Checking that we hold mvm->mutex is a good idea, but the rate
+        * control can't acquire the mutex since it runs in Tx path.
+        * So this is racy in that case, but in the worst case, the AMPDU
+        * size limit will be wrong for a short time which is not a big
+        * issue.
+        */
+
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       if (!chanctx_conf ||
+           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return BT_COEX_INVALID_LUT;
+       }
+
+       ret = BT_COEX_TX_DIS_LUT;
+
+       if (mvm->cfg->bt_shared_single_ant) {
+               rcu_read_unlock();
+               return ret;
+       }
+
+       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+       if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
+       else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
+       /* else - default = TX TX disallowed */
+
+       rcu_read_unlock();
+
+       return ret;
+}
+
+int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
+{
+       struct iwl_bt_coex_cmd_old *bt_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       int ret;
+       u32 flags;
+
+       ret = iwl_send_bt_prio_tbl(mvm);
+       if (ret)
+               return ret;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+               switch (mvm->bt_force_ant_mode) {
+               case BT_FORCE_ANT_AUTO:
+                       flags = BT_COEX_AUTO_OLD;
+                       break;
+               case BT_FORCE_ANT_BT:
+                       flags = BT_COEX_BT_OLD;
+                       break;
+               case BT_FORCE_ANT_WIFI:
+                       flags = BT_COEX_WIFI_OLD;
+                       break;
+               default:
+                       WARN_ON(1);
+                       flags = 0;
+               }
+
+               bt_cmd->flags = cpu_to_le32(flags);
+               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
+               goto send_cmd;
+       }
+
+       bt_cmd->max_kill = 5;
+       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+       bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+       bt_cmd->bt4_tx_rx_max_freq0 = 15;
+       bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+       bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
+
+       flags = iwlwifi_mod_params.bt_coex_active ?
+                       BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
+       bt_cmd->flags = cpu_to_le32(flags);
+
+       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
+                                           BT_VALID_BT_PRIO_BOOST |
+                                           BT_VALID_MAX_KILL |
+                                           BT_VALID_3W_TMRS |
+                                           BT_VALID_KILL_ACK |
+                                           BT_VALID_KILL_CTS |
+                                           BT_VALID_REDUCED_TX_POWER |
+                                           BT_VALID_LUT |
+                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+                                           BT_VALID_ANT_ISOLATION |
+                                           BT_VALID_ANT_ISOLATION_THRS |
+                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
+                                           BT_VALID_TXRX_MAX_FREQ_0 |
+                                           BT_VALID_SYNC_TO_SCO);
+
+       if (IWL_MVM_BT_COEX_SYNC2SCO)
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
+       if (IWL_MVM_BT_COEX_CORUNNING) {
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                    BT_VALID_CORUN_LUT_40);
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
+       }
+
+       if (IWL_MVM_BT_COEX_MPLUT) {
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
+               bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+       }
+
+       if (mvm->cfg->bt_shared_single_ant)
+               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+                      sizeof(iwl_single_shared_ant));
+       else
+               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+                      sizeof(iwl_combined_lookup));
+
+       /* Take first Co-running block LUT to get started */
+       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
+              sizeof(bt_cmd->bt4_corun_lut20));
+       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
+              sizeof(bt_cmd->bt4_corun_lut40));
+
+       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+              sizeof(iwl_bt_prio_boost));
+       memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+              sizeof(iwl_bt_mprio_lut));
+       bt_cmd->kill_ack_msk =
+               cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
+       bt_cmd->kill_cts_msk =
+               cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
+
+send_cmd:
+       memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
+       memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
+                                          bool reduced_tx_power)
+{
+       enum iwl_bt_kill_msk bt_kill_msk;
+       struct iwl_bt_coex_cmd_old *bt_cmd;
+       struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .data[0] = &bt_cmd,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       int ret = 0;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (reduced_tx_power) {
+               /* Reduced Tx power has precedence on the type of the profile */
+               bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
+       } else {
+               /* Low latency BT profile is active: give higher prio to BT */
+               if (BT_MBOX_MSG(notif, 3, SCO_STATE)  ||
+                   BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
+                   BT_MBOX_MSG(notif, 3, SNIFF_STATE))
+                       bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
+               else
+                       bt_kill_msk = BT_KILL_MSK_DEFAULT;
+       }
+
+       IWL_DEBUG_COEX(mvm,
+                      "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
+                      bt_kill_msk,
+                      BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
+                      BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
+                      BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
+
+       /* Don't send HCMD if there is no update */
+       if (bt_kill_msk == mvm->bt_kill_msk)
+               return 0;
+
+       mvm->bt_kill_msk = bt_kill_msk;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
+
+       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+                                            BT_VALID_KILL_ACK |
+                                            BT_VALID_KILL_CTS);
+
+       IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+                      iwl_bt_ack_kill_msk[bt_kill_msk],
+                      iwl_bt_cts_kill_msk[bt_kill_msk]);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+                                      bool enable)
+{
+       struct iwl_bt_coex_cmd_old *bt_cmd;
+       /* Send ASYNC since this can be sent from an atomic context */
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_ASYNC,
+       };
+       struct iwl_mvm_sta *mvmsta;
+       int ret;
+
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+       if (!mvmsta)
+               return 0;
+
+       /* nothing to do */
+       if (mvmsta->bt_reduced_txpower == enable)
+               return 0;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
+
+       bt_cmd->valid_bit_msk =
+               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
+       bt_cmd->bt_reduced_tx_power = sta_id;
+
+       if (enable)
+               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+
+       IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
+                      enable ? "en" : "dis", sta_id);
+
+       mvmsta->bt_reduced_txpower = enable;
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+struct iwl_bt_iterator_data {
+       struct iwl_bt_coex_profile_notif_old *notif;
+       struct iwl_mvm *mvm;
+       u32 num_bss_ifaces;
+       bool reduced_tx_power;
+       struct ieee80211_chanctx_conf *primary;
+       struct ieee80211_chanctx_conf *secondary;
+       bool primary_ll;
+};
+
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      bool enable, int rssi)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->bf_data.last_bt_coex_event = rssi;
+       mvmvif->bf_data.bt_coex_max_thold =
+               enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
+       mvmvif->bf_data.bt_coex_min_thold =
+               enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_bt_iterator_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum ieee80211_smps_mode smps_mode;
+       u32 bt_activity_grading;
+       int ave_rssi;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               /* Count BSSes vifs */
+               data->num_bss_ifaces++;
+               /* default smps_mode for BSS / P2P client is AUTOMATIC */
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+               break;
+       case NL80211_IFTYPE_AP:
+               /* default smps_mode for AP / GO is OFF */
+               smps_mode = IEEE80211_SMPS_OFF;
+               if (!mvmvif->ap_ibss_active) {
+                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                           smps_mode);
+                       return;
+               }
+
+               /* the Ack / Cts kill mask must be default if AP / GO */
+               data->reduced_tx_power = false;
+               break;
+       default:
+               return;
+       }
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       /* If channel context is invalid or not on 2.4GHz .. */
+       if ((!chanctx_conf ||
+            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+               /* ... relax constraints and disable rssi events */
+               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                   smps_mode);
+               data->reduced_tx_power = false;
+               if (vif->type == NL80211_IFTYPE_STATION) {
+                       iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+                                                   false);
+                       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               }
+               return;
+       }
+
+       bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+       if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+               smps_mode = IEEE80211_SMPS_STATIC;
+       else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+               smps_mode = vif->type == NL80211_IFTYPE_AP ?
+                               IEEE80211_SMPS_OFF :
+                               IEEE80211_SMPS_DYNAMIC;
+
+       /* relax SMPS contraints for next association */
+       if (!vif->bss_conf.assoc)
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+       IWL_DEBUG_COEX(data->mvm,
+                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+                      mvmvif->id, data->notif->bt_status, bt_activity_grading,
+                      smps_mode);
+
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+
+       /* low latency is always primary */
+       if (iwl_mvm_vif_low_latency(mvmvif)) {
+               data->primary_ll = true;
+
+               data->secondary = data->primary;
+               data->primary = chanctx_conf;
+       }
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               if (!mvmvif->ap_ibss_active)
+                       return;
+
+               if (chanctx_conf == data->primary)
+                       return;
+
+               if (!data->primary_ll) {
+                       /*
+                        * downgrade the current primary no matter what its
+                        * type is.
+                        */
+                       data->secondary = data->primary;
+                       data->primary = chanctx_conf;
+               } else {
+                       /* there is low latency vif - we will be secondary */
+                       data->secondary = chanctx_conf;
+               }
+               return;
+       }
+
+       /*
+        * STA / P2P Client, try to be primary if first vif. If we are in low
+        * latency mode, we are already in primary and just don't do much
+        */
+       if (!data->primary || data->primary == chanctx_conf)
+               data->primary = chanctx_conf;
+       else if (!data->secondary)
+               /* if secondary is not NULL, it might be a GO */
+               data->secondary = chanctx_conf;
+
+       /*
+        * don't reduce the Tx power if one of these is true:
+        *  we are in LOOSE
+        *  single share antenna product
+        *  BT is active
+        *  we are associated
+        */
+       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+           mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+           !data->notif->bt_status) {
+               data->reduced_tx_power = false;
+               iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               return;
+       }
+
+       /* try to get the avg rssi from fw */
+       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+
+       /* if the RSSI isn't valid, fake it is very low */
+       if (!ave_rssi)
+               ave_rssi = -100;
+       if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
+               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
+                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+
+               /*
+                * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
+                * BSS / P2P clients have rssi above threshold.
+                * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
+                * the iteration, if one interface's rssi isn't good enough,
+                * bt_kill_msk will be set to default values.
+                */
+       } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
+               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
+                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+
+               /*
+                * One interface hasn't rssi above threshold, bt_kill_msk must
+                * be set to default values.
+                */
+               data->reduced_tx_power = false;
+       }
+
+       /* Begin to monitor the RSSI: it may influence the reduced Tx power */
+       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+}
+
+static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
+{
+       struct iwl_bt_iterator_data data = {
+               .mvm = mvm,
+               .notif = &mvm->last_bt_notif_old,
+               .reduced_tx_power = true,
+       };
+       struct iwl_bt_coex_ci_cmd_old cmd = {};
+       u8 ci_bw_idx;
+
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return;
+
+       rcu_read_lock();
+       ieee80211_iterate_active_interfaces_atomic(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_bt_notif_iterator, &data);
+
+       if (data.primary) {
+               struct ieee80211_chanctx_conf *chan = data.primary;
+
+               if (WARN_ON(!chan->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_primary = 0;
+               } else {
+                       cmd.co_run_bw_primary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_primary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+       }
+
+       if (data.secondary) {
+               struct ieee80211_chanctx_conf *chan = data.secondary;
+
+               if (WARN_ON(!data.secondary->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_secondary = 0;
+               } else {
+                       cmd.co_run_bw_secondary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_secondary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+       }
+
+       rcu_read_unlock();
+
+       /* Don't spam the fw with the same command over and over */
+       if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
+               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
+                                        sizeof(cmd), &cmd))
+                       IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
+               memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
+       }
+
+       /*
+        * If there are no BSS / P2P client interfaces, reduced Tx Power is
+        * irrelevant since it is based on the RSSI coming from the beacon.
+        * Use BT_KILL_MSK_DEFAULT in that case.
+        */
+       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
+
+       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb,
+                                struct iwl_device_cmd *dev_cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
+
+       IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+                      notif->bt_status ? "ON" : "OFF");
+       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
+       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+                      le32_to_cpu(notif->primary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+                      le32_to_cpu(notif->secondary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+                      le32_to_cpu(notif->bt_activity_grading));
+       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
+                      notif->bt_agg_traffic_load);
+
+       /* remember this notification for future use: rssi fluctuations */
+       memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
+
+       iwl_mvm_bt_coex_notif_handle(mvm);
+
+       /*
+        * This is an async handler for a notification, returning anything other
+        * than 0 doesn't make sense even if HCMD failed.
+        */
+       return 0;
+}
+
+static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+       struct iwl_bt_iterator_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       /* If channel context is invalid or not on 2.4GHz - don't count it */
+       if (!chanctx_conf ||
+           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       if (vif->type != NL80211_IFTYPE_STATION ||
+           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+
+       /* This can happen if the station has been removed right now */
+       if (IS_ERR_OR_NULL(sta))
+               return;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       data->num_bss_ifaces++;
+
+       /*
+        * This interface doesn't support reduced Tx power (because of low
+        * RSSI probably), then set bt_kill_msk to default values.
+        */
+       if (!mvmsta->bt_reduced_txpower)
+               data->reduced_tx_power = false;
+       /* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
+}
+
+void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                              enum ieee80211_rssi_event rssi_event)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+       struct iwl_bt_iterator_data data = {
+               .mvm = mvm,
+               .reduced_tx_power = true,
+       };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return;
+
+       /*
+        * Rssi update while not associated - can happen since the statistics
+        * are handled asynchronously
+        */
+       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       /* No BT - reports should be disabled */
+       if (!mvm->last_bt_notif_old.bt_status)
+               return;
+
+       IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+                      rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+
+       /*
+        * Check if rssi is good enough for reduced Tx power, but not in loose
+        * scheme.
+        */
+       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
+               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+                                                 false);
+       else
+               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
+
+       if (ret)
+               IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_bt_rssi_iterator, &data);
+
+       /*
+        * If there are no BSS / P2P client interfaces, reduced Tx Power is
+        * irrelevant since it is based on the RSSI coming from the beacon.
+        * Use BT_KILL_MSK_DEFAULT in that case.
+        */
+       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
+
+       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
+
+u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
+                                   struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       enum iwl_bt_coex_lut_type lut_type;
+
+       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
+           BT_HIGH_TRAFFIC)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       if (mvm->last_bt_notif_old.ttc_enabled)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+       if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       /* tight coex, high bt traffic, reduce AGG time limit */
+       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
+                                        struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       enum iwl_bt_coex_lut_type lut_type;
+
+       if (mvm->last_bt_notif_old.ttc_enabled)
+               return true;
+
+       if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
+           BT_HIGH_TRAFFIC)
+               return true;
+
+       /*
+        * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+        * since BT is already killed.
+        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+        * we Tx.
+        * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
+        */
+       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+       return lut_type != BT_COEX_LOOSE_LUT;
+}
+
+bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
+{
+       u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
+       return ag == BT_OFF;
+}
+
+bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
+                                       enum ieee80211_band band)
+{
+       u32 bt_activity =
+               le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
+
+       if (band != IEEE80211_BAND_2GHZ)
+               return false;
+
+       return bt_activity >= BT_LOW_TRAFFIC;
+}
+
+void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
+{
+       iwl_mvm_bt_coex_notif_handle(mvm);
+}
+
+int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                     struct iwl_rx_cmd_buffer *rxb,
+                                     struct iwl_device_cmd *dev_cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+       u8 __maybe_unused lower_bound, upper_bound;
+       int ret;
+       u8 lut;
+
+       struct iwl_bt_coex_cmd_old *bt_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+
+       if (!IWL_MVM_BT_COEX_CORUNNING)
+               return 0;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Ignore updates if we are in force mode */
+       if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+               return 0;
+
+       if (ant_isolation ==  mvm->last_ant_isol)
+               return 0;
+
+       for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
+               if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
+                       break;
+
+       lower_bound = antenna_coupling_ranges[lut].range;
+
+       if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
+               upper_bound = antenna_coupling_ranges[lut + 1].range;
+       else
+               upper_bound = antenna_coupling_ranges[lut].range;
+
+       IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
+                      ant_isolation, lower_bound, upper_bound, lut);
+
+       mvm->last_ant_isol = ant_isolation;
+
+       if (mvm->last_corun_lut == lut)
+               return 0;
+
+       mvm->last_corun_lut = lut;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return 0;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
+       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+                                            BT_VALID_CORUN_LUT_20 |
+                                            BT_VALID_CORUN_LUT_40);
+
+       /* For the moment, use the same LUT for 20GHz and 40GHz */
+       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
+              sizeof(bt_cmd->bt4_corun_lut20));
+
+       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
+              sizeof(bt_cmd->bt4_corun_lut40));
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
index 51685693af2e47e7ddfb7491bf8f74f0a1d745ae..ca79f7160573445110485b313d9a31a871d990b2 100644 (file)
@@ -79,6 +79,8 @@
 #define IWL_MVM_PS_SNOOZE_WINDOW               50
 #define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW                25
 #define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT       64
+#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH      62
+#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH     65
 #define IWL_MVM_BT_COEX_SYNC2SCO               1
 #define IWL_MVM_BT_COEX_CORUNNING              1
 #define IWL_MVM_BT_COEX_MPLUT                  1
index 29ca72695eaa60e0f53121dd45f1d080cdefba1d..f131ef0ec5b30a482311a3ce9b7dec331965534b 100644 (file)
@@ -312,20 +312,69 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
                                         BT_MBOX_MSG(notif, _num, _field),  \
                                         true ? "\n" : ", ");
 
-static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
-                                      size_t count, loff_t *ppos)
+static
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
+                          int pos, int bufsz)
 {
-       struct iwl_mvm *mvm = file->private_data;
-       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
-       char *buf;
-       int ret, pos = 0, bufsz = sizeof(char) * 1024;
+       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
 
-       buf = kmalloc(bufsz, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
+       BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
+       BT_MBOX_PRINT(0, LE_PROF1, false);
+       BT_MBOX_PRINT(0, LE_PROF2, false);
+       BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
+       BT_MBOX_PRINT(0, CHL_SEQ_N, false);
+       BT_MBOX_PRINT(0, INBAND_S, false);
+       BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
+       BT_MBOX_PRINT(0, LE_SCAN, false);
+       BT_MBOX_PRINT(0, LE_ADV, false);
+       BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
+       BT_MBOX_PRINT(0, OPEN_CON_1, true);
 
-       mutex_lock(&mvm->mutex);
+       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
+
+       BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
+       BT_MBOX_PRINT(1, IP_SR, false);
+       BT_MBOX_PRINT(1, LE_MSTR, false);
+       BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
+       BT_MBOX_PRINT(1, MSG_TYPE, false);
+       BT_MBOX_PRINT(1, SSN, true);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
+
+       BT_MBOX_PRINT(2, SNIFF_ACT, false);
+       BT_MBOX_PRINT(2, PAG, false);
+       BT_MBOX_PRINT(2, INQUIRY, false);
+       BT_MBOX_PRINT(2, CONN, false);
+       BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
+       BT_MBOX_PRINT(2, DISC, false);
+       BT_MBOX_PRINT(2, SCO_TX_ACT, false);
+       BT_MBOX_PRINT(2, SCO_RX_ACT, false);
+       BT_MBOX_PRINT(2, ESCO_RE_TX, false);
+       BT_MBOX_PRINT(2, SCO_DURATION, true);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
+
+       BT_MBOX_PRINT(3, SCO_STATE, false);
+       BT_MBOX_PRINT(3, SNIFF_STATE, false);
+       BT_MBOX_PRINT(3, A2DP_STATE, false);
+       BT_MBOX_PRINT(3, ACL_STATE, false);
+       BT_MBOX_PRINT(3, MSTR_STATE, false);
+       BT_MBOX_PRINT(3, OBX_STATE, false);
+       BT_MBOX_PRINT(3, OPEN_CON_2, false);
+       BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
+       BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
+       BT_MBOX_PRINT(3, INBAND_P, false);
+       BT_MBOX_PRINT(3, MSG_TYPE_2, false);
+       BT_MBOX_PRINT(3, SSN_2, false);
+       BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
+
+       return pos;
+}
 
+static
+int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
+                              char *buf, int pos, int bufsz)
+{
        pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
 
        BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
@@ -378,25 +427,59 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
        BT_MBOX_PRINT(3, SSN_2, false);
        BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
-                        notif->bt_status);
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
-                        notif->bt_open_conn);
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
-                        notif->bt_traffic_load);
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
-                        notif->bt_agg_traffic_load);
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                        notif->bt_ci_compliance);
-       pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
-                        le32_to_cpu(notif->primary_ch_lut));
-       pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
-                        le32_to_cpu(notif->secondary_ch_lut));
-       pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
-                        le32_to_cpu(notif->bt_activity_grading));
-       pos += scnprintf(buf+pos, bufsz-pos,
-                        "antenna isolation = %d CORUN LUT index = %d\n",
-                        mvm->last_ant_isol, mvm->last_corun_lut);
+       return pos;
+}
+
+static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char *buf;
+       int ret, pos = 0, bufsz = sizeof(char) * 1024;
+
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               struct iwl_bt_coex_profile_notif_old *notif =
+                       &mvm->last_bt_notif_old;
+
+               pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
+
+               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
+                                notif->bt_ci_compliance);
+               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+                                le32_to_cpu(notif->primary_ch_lut));
+               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+                                le32_to_cpu(notif->secondary_ch_lut));
+               pos += scnprintf(buf+pos,
+                                bufsz-pos, "bt_activity_grading = %d\n",
+                                le32_to_cpu(notif->bt_activity_grading));
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "antenna isolation = %d CORUN LUT index = %d\n",
+                                mvm->last_ant_isol, mvm->last_corun_lut);
+       } else {
+               struct iwl_bt_coex_profile_notif *notif =
+                       &mvm->last_bt_notif;
+
+               pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+               pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
+                                notif->bt_ci_compliance);
+               pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+                                le32_to_cpu(notif->primary_ch_lut));
+               pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+                                le32_to_cpu(notif->secondary_ch_lut));
+               pos += scnprintf(buf+pos,
+                                bufsz-pos, "bt_activity_grading = %d\n",
+                                le32_to_cpu(notif->bt_activity_grading));
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "antenna isolation = %d CORUN LUT index = %d\n",
+                                mvm->last_ant_isol, mvm->last_corun_lut);
+       }
 
        mutex_unlock(&mvm->mutex);
 
@@ -411,28 +494,48 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
        char buf[256];
        int bufsz = sizeof(buf);
        int pos = 0;
 
        mutex_lock(&mvm->mutex);
 
-       pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
-       pos += scnprintf(buf+pos, bufsz-pos,
-                      "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
-                      le64_to_cpu(cmd->bt_primary_ci),
-                      !!cmd->co_run_bw_primary);
-       pos += scnprintf(buf+pos, bufsz-pos,
-                      "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
-                      le64_to_cpu(cmd->bt_secondary_ci),
-                      !!cmd->co_run_bw_secondary);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
-       pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
-                        iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
-       pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
-                        iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
+
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "Channel inhibition CMD\n");
+               pos += scnprintf(buf+pos, bufsz-pos,
+                              "\tPrimary Channel Bitmap 0x%016llx\n",
+                              le64_to_cpu(cmd->bt_primary_ci));
+               pos += scnprintf(buf+pos, bufsz-pos,
+                              "\tSecondary Channel Bitmap 0x%016llx\n",
+                              le64_to_cpu(cmd->bt_secondary_ci));
+
+               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+                                iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+                                iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+       } else {
+               struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+
+               pos += scnprintf(buf+pos, bufsz-pos,
+                                "Channel inhibition CMD\n");
+               pos += scnprintf(buf+pos, bufsz-pos,
+                              "\tPrimary Channel Bitmap 0x%016llx\n",
+                              le64_to_cpu(cmd->bt_primary_ci));
+               pos += scnprintf(buf+pos, bufsz-pos,
+                              "\tSecondary Channel Bitmap 0x%016llx\n",
+                              le64_to_cpu(cmd->bt_secondary_ci));
+
+               pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+               pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+                                iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+               pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+                                iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+       }
 
        mutex_unlock(&mvm->mutex);
 
@@ -455,6 +558,43 @@ iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
        return count;
 }
 
+static ssize_t
+iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
+                            size_t count, loff_t *ppos)
+{
+       static const char * const modes_str[BT_FORCE_ANT_MAX] = {
+               [BT_FORCE_ANT_DIS] = "dis",
+               [BT_FORCE_ANT_AUTO] = "auto",
+               [BT_FORCE_ANT_BT] = "bt",
+               [BT_FORCE_ANT_WIFI] = "wifi",
+       };
+       int ret, bt_force_ant_mode;
+
+       for (bt_force_ant_mode = 0;
+            bt_force_ant_mode < ARRAY_SIZE(modes_str);
+            bt_force_ant_mode++) {
+               if (!strcmp(buf, modes_str[bt_force_ant_mode]))
+                       break;
+       }
+
+       if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
+               return -EINVAL;
+
+       ret = 0;
+       mutex_lock(&mvm->mutex);
+       if (mvm->bt_force_ant_mode == bt_force_ant_mode)
+               goto out;
+
+       mvm->bt_force_ant_mode = bt_force_ant_mode;
+       IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
+                      modes_str[mvm->bt_force_ant_mode]);
+       ret = iwl_send_bt_init_conf(mvm);
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
 #define PRINT_STATS_LE32(_str, _val)                                   \
                         pos += scnprintf(buf + pos, bufsz - pos,       \
                                          fmt_table, _str,              \
@@ -1101,6 +1241,7 @@ MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
 MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 
@@ -1142,6 +1283,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
                             S_IWUSR | S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
index 5fe82c29c8ad07bcb7bab43726a4e09b136d8b53..ab12aaa43034765e2c48cad43de2813b7f6ace7f 100644 (file)
  * enum iwl_bt_coex_flags - flags for BT_COEX command
  * @BT_COEX_MODE_POS:
  * @BT_COEX_MODE_MSK:
- * @BT_COEX_DISABLE:
- * @BT_COEX_2W:
- * @BT_COEX_3W:
- * @BT_COEX_NW:
+ * @BT_COEX_DISABLE_OLD:
+ * @BT_COEX_2W_OLD:
+ * @BT_COEX_3W_OLD:
+ * @BT_COEX_NW_OLD:
+ * @BT_COEX_AUTO_OLD:
+ * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests)
+ * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests)
  * @BT_COEX_SYNC2SCO:
  * @BT_COEX_CORUNNING:
  * @BT_COEX_MPLUT:
 enum iwl_bt_coex_flags {
        BT_COEX_MODE_POS                = 3,
        BT_COEX_MODE_MSK                = BITS(3) << BT_COEX_MODE_POS,
-       BT_COEX_DISABLE                 = 0x0 << BT_COEX_MODE_POS,
-       BT_COEX_2W                      = 0x1 << BT_COEX_MODE_POS,
-       BT_COEX_3W                      = 0x2 << BT_COEX_MODE_POS,
-       BT_COEX_NW                      = 0x3 << BT_COEX_MODE_POS,
+       BT_COEX_DISABLE_OLD             = 0x0 << BT_COEX_MODE_POS,
+       BT_COEX_2W_OLD                  = 0x1 << BT_COEX_MODE_POS,
+       BT_COEX_3W_OLD                  = 0x2 << BT_COEX_MODE_POS,
+       BT_COEX_NW_OLD                  = 0x3 << BT_COEX_MODE_POS,
+       BT_COEX_AUTO_OLD                = 0x5 << BT_COEX_MODE_POS,
+       BT_COEX_BT_OLD                  = 0x6 << BT_COEX_MODE_POS,
+       BT_COEX_WIFI_OLD                = 0x7 << BT_COEX_MODE_POS,
        BT_COEX_SYNC2SCO                = BIT(7),
        BT_COEX_CORUNNING               = BIT(8),
        BT_COEX_MPLUT                   = BIT(9),
@@ -151,7 +157,7 @@ enum iwl_bt_coex_lut_type {
 #define BT_REDUCED_TX_POWER_BIT BIT(7)
 
 /**
- * struct iwl_bt_coex_cmd - bt coex configuration command
+ * struct iwl_bt_coex_cmd_old - bt coex configuration command
  * @flags:&enum iwl_bt_coex_flags
  * @max_kill:
  * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
@@ -176,7 +182,7 @@ enum iwl_bt_coex_lut_type {
  *
  * The structure is used for the BT_COEX command.
  */
-struct iwl_bt_coex_cmd {
+struct iwl_bt_coex_cmd_old {
        __le32 flags;
        u8 max_kill;
        u8 bt_reduced_tx_power;
@@ -202,26 +208,117 @@ struct iwl_bt_coex_cmd {
        __le32 valid_bit_msk;
 } __packed; /* BT_COEX_CMD_API_S_VER_5 */
 
+enum iwl_bt_coex_mode {
+       BT_COEX_DISABLE                 = 0x0,
+       BT_COEX_NW                      = 0x1,
+       BT_COEX_BT                      = 0x2,
+       BT_COEX_WIFI                    = 0x3,
+}; /* BT_COEX_MODES_E */
+
+enum iwl_bt_coex_enabled_modules {
+       BT_COEX_MPLUT_ENABLED           = BIT(0),
+       BT_COEX_MPLUT_BOOST_ENABLED     = BIT(1),
+       BT_COEX_SYNC2SCO_ENABLED        = BIT(2),
+       BT_COEX_CORUN_ENABLED           = BIT(3),
+       BT_COEX_HIGH_BAND_RET           = BIT(4),
+}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_cmd - bt coex configuration command
+ * @mode: enum %iwl_bt_coex_mode
+ * @enabled_modules: enum %iwl_bt_coex_enabled_modules
+ * @max_kill: max count of Tx retries due to kill from PTA
+ * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT
+ *     should be set by default
+ * @bt4_antenna_isolation_thr: antenna threshold value
+ * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency
+ * @bt4_tx_rx_max_freq0: TxRx max frequency
+ * @multiprio_lut: multi priority LUT configuration
+ * @mplut_prio_boost: BT priority boost registers
+ * @decision_lut: PTA decision LUT, per Prio-Ch
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwl_bt_coex_cmd {
+       __le32 mode;
+       __le32 enabled_modules;
+
+       __le32 max_kill;
+       __le32 override_primary_lut;
+       __le32 override_secondary_lut;
+       __le32 bt4_antenna_isolation_thr;
+
+       __le32 bt4_tx_tx_delta_freq_thr;
+       __le32 bt4_tx_rx_max_freq0;
+
+       __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+       __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE];
+
+       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut
+ * @corun_lut20: co-running 20 MHz LUT configuration
+ * @corun_lut40: co-running 40 MHz LUT configuration
+ *
+ * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command.
+ */
+struct iwl_bt_coex_corun_lut_update_cmd {
+       __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+       __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */
+
+/**
+ * struct iwl_bt_coex_sw_boost - SW boost values
+ * @wifi_tx_prio_boost: SW boost of wifi tx priority
+ * @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK.
+ * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS.
+ */
+struct iwl_bt_coex_sw_boost {
+       __le32 wifi_tx_prio_boost;
+       __le32 wifi_rx_prio_boost;
+       __le32 kill_ack_msk;
+       __le32 kill_cts_msk;
+};
+
+/**
+ * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost
+ * @boost_values: check struct  %iwl_bt_coex_sw_boost - one for each channel
+ *     primary / secondary / low priority
+ */
+struct iwl_bt_coex_sw_boost_update_cmd {
+       struct iwl_bt_coex_sw_boost boost_values[3];
+} __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */
+
+/**
+ * struct iwl_bt_coex_reduced_txp_update_cmd
+ * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
+ *     bits are the sta_id (value)
+ */
+struct iwl_bt_coex_reduced_txp_update_cmd {
+       __le32 reduced_txp;
+} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
+
 /**
  * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
  * @bt_primary_ci:
- * @bt_secondary_ci:
- * @co_run_bw_primary:
- * @co_run_bw_secondary:
  * @primary_ch_phy_id:
+ * @bt_secondary_ci:
  * @secondary_ch_phy_id:
  *
  * Used for BT_COEX_CI command
  */
 struct iwl_bt_coex_ci_cmd {
        __le64 bt_primary_ci;
-       __le64 bt_secondary_ci;
+       __le32 primary_ch_phy_id;
 
-       u8 co_run_bw_primary;
-       u8 co_run_bw_secondary;
-       u8 primary_ch_phy_id;
-       u8 secondary_ch_phy_id;
-} __packed; /* BT_CI_MSG_API_S_VER_1 */
+       __le64 bt_secondary_ci;
+       __le32 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_2 */
 
 #define BT_MBOX(n_dw, _msg, _pos, _nbits)      \
        BT_MBOX##n_dw##_##_msg##_POS = (_pos),  \
@@ -290,33 +387,40 @@ enum iwl_bt_activity_grading {
        BT_HIGH_TRAFFIC         = 3,
 }; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
 
+enum iwl_bt_ci_compliance {
+       BT_CI_COMPLIANCE_NONE           = 0,
+       BT_CI_COMPLIANCE_PRIMARY        = 1,
+       BT_CI_COMPLIANCE_SECONDARY      = 2,
+       BT_CI_COMPLIANCE_BOTH           = 3,
+}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
+
+#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id)   \
+               (_ttc_rrc_status & BIT(_phy_id))
+
+#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id)   \
+               ((_ttc_rrc_status >> 4) & BIT(_phy_id))
+
 /**
  * struct iwl_bt_coex_profile_notif - notification about BT coex
  * @mbox_msg: message from BT to WiFi
  * @msg_idx: the index of the message
- * @bt_status: 0 - off, 1 - on
- * @bt_open_conn: number of BT connections open
- * @bt_traffic_load: load of BT traffic
- * @bt_agg_traffic_load: aggregated load of BT traffic
- * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
- * @primary_ch_lut: LUT used for primary channel
- * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_ci_compliance: enum %iwl_bt_ci_compliance
+ * @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type
+ * @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type
  * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
+ * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY
  */
 struct iwl_bt_coex_profile_notif {
        __le32 mbox_msg[4];
        __le32 msg_idx;
-       u8 bt_status;
-       u8 bt_open_conn;
-       u8 bt_traffic_load;
-       u8 bt_agg_traffic_load;
-       u8 bt_ci_compliance;
-       u8 reserved[3];
+       __le32 bt_ci_compliance;
 
        __le32 primary_ch_lut;
        __le32 secondary_ch_lut;
        __le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
+       u8 ttc_rrc_status;
+       u8 reserved[3];
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
 
 enum iwl_bt_coex_prio_table_event {
        BT_COEX_PRIO_TBL_EVT_INIT_CALIB1                = 0,
@@ -355,4 +459,54 @@ struct iwl_bt_coex_prio_tbl_cmd {
        u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
 } __packed;
 
+/**
+ * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd_old {
+       __le64 bt_primary_ci;
+       __le64 bt_secondary_ci;
+
+       u8 co_run_bw_primary;
+       u8 co_run_bw_secondary;
+       u8 primary_ch_phy_id;
+       u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
+/**
+ * struct iwl_bt_coex_profile_notif_old - notification about BT coex
+ * @mbox_msg: message from BT to WiFi
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
+ */
+struct iwl_bt_coex_profile_notif_old {
+       __le32 mbox_msg[4];
+       __le32 msg_idx;
+       u8 bt_status;
+       u8 bt_open_conn;
+       u8 bt_traffic_load;
+       u8 bt_agg_traffic_load;
+       u8 bt_ci_compliance;
+       u8 ttc_enabled;
+       __le16 reserved;
+
+       __le32 primary_ch_lut;
+       __le32 secondary_ch_lut;
+       __le32 bt_activity_grading;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */
+
 #endif /* __fw_api_bt_coex_h__ */
index cbbcd8e284e4bec620eb7e272b02941b3805aa0b..c3a8c86b550d45aaca72a274252960d8397d0ef6 100644 (file)
@@ -336,7 +336,7 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_DEBUG_FLAG_D0I3 0
 
 #define IWL_BF_ESCAPE_TIMER_DEFAULT 50
-#define IWL_BF_ESCAPE_TIMER_D0I3 1024
+#define IWL_BF_ESCAPE_TIMER_D0I3 0
 #define IWL_BF_ESCAPE_TIMER_MAX 1024
 #define IWL_BF_ESCAPE_TIMER_MIN 0
 
index 6959fda3fe09d09e34d5fe19c7ded403fba79c37..c02a9e45ec5eaec78ce9ef49c47957c6fcd2c7da 100644 (file)
@@ -169,19 +169,13 @@ enum iwl_scan_type {
        SCAN_TYPE_DISCOVERY_FORCED      = 6,
 }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
 
-/**
- * Maximal number of channels to scan
- * it should be equal to:
- * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
- */
-#define MAX_NUM_SCAN_CHANNELS 50
-
 /**
  * struct iwl_scan_cmd - scan request command
  * ( SCAN_REQUEST_CMD = 0x80 )
  * @len: command length in bytes
  * @scan_flags: scan flags from SCAN_FLAGS_*
- * @channel_count: num of channels in channel list (1 - MAX_NUM_SCAN_CHANNELS)
+ * @channel_count: num of channels in channel list
+ *     (1 - ucode_capa.n_scan_channels)
  * @quiet_time: in msecs, dwell this time for active scan on quiet channels
  * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
  *     this number of packets were received (typically 1)
@@ -345,7 +339,7 @@ struct iwl_scan_results_notif {
  * @last_channel: last channel that was scanned
  * @tsf_low: TSF timer (lower half) in usecs
  * @tsf_high: TSF timer (higher half) in usecs
- * @results: all scan results, only "scanned_channels" of them are valid
+ * @results: array of scan results, only "scanned_channels" of them are valid
  */
 struct iwl_scan_complete_notif {
        u8 scanned_channels;
@@ -354,11 +348,10 @@ struct iwl_scan_complete_notif {
        u8 last_channel;
        __le32 tsf_low;
        __le32 tsf_high;
-       struct iwl_scan_results_notif results[MAX_NUM_SCAN_CHANNELS];
+       struct iwl_scan_results_notif results[];
 } __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
 
 /* scan offload */
-#define IWL_MAX_SCAN_CHANNELS          40
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN   16
 #define IWL_SCAN_MAX_PROFILES          11
@@ -423,36 +416,24 @@ enum iwl_scan_offload_channel_flags {
        IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
 };
 
-/**
- * iwl_scan_channel_cfg - SCAN_CHANNEL_CFG_S
- * @type:              bitmap - see enum iwl_scan_offload_channel_flags.
- *                     0:      passive (0) or active (1) scan.
- *                     1-20:   directed scan to i'th ssid.
- *                     22:     channel width configuation - 1 for narrow.
- *                     24:     full scan.
- *                     25:     partial scan.
- * @channel_number:    channel number 1-13 etc.
- * @iter_count:                repetition count for the channel.
- * @iter_interval:     interval between two innteration on one channel.
- * @dwell_time:        entry 0 - active scan, entry 1 - passive scan.
+/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
+ * __le32 type:        bitmap; bits 1-20 are for directed scan to i'th ssid and
+ *     see enum iwl_scan_offload_channel_flags.
+ * __le16 channel_number: channel number 1-13 etc.
+ * __le16 iter_count: repetition count for the channel.
+ * __le32 iter_interval: interval between two innteration on one channel.
+ * u8 active_dwell.
+ * u8 passive_dwell.
  */
-struct iwl_scan_channel_cfg {
-       __le32 type[IWL_MAX_SCAN_CHANNELS];
-       __le16 channel_number[IWL_MAX_SCAN_CHANNELS];
-       __le16 iter_count[IWL_MAX_SCAN_CHANNELS];
-       __le32 iter_interval[IWL_MAX_SCAN_CHANNELS];
-       u8 dwell_time[IWL_MAX_SCAN_CHANNELS][2];
-} __packed;
+#define IWL_SCAN_CHAN_SIZE 14
 
 /**
  * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
  * @scan_cmd:          scan command fixed part
- * @channel_cfg:       scan channel configuration
- * @data:              probe request frames (one per band)
+ * @data:              scan channel configuration and probe request frames
  */
 struct iwl_scan_offload_cfg {
        struct iwl_scan_offload_cmd scan_cmd;
-       struct iwl_scan_channel_cfg channel_cfg;
        u8 data[0];
 } __packed;
 
@@ -528,7 +509,7 @@ struct iwl_scan_offload_profile_cfg {
  * @full_scan_mul:     number of partial scans before each full scan
  */
 struct iwl_scan_offload_schedule {
-       u16 delay;
+       __le16 delay;
        u8 iterations;
        u8 full_scan_mul;
 } __packed;
@@ -601,4 +582,211 @@ struct iwl_sched_scan_results {
        u8 reserved;
 };
 
+/* Unified LMAC scan API */
+
+#define IWL_MVM_BASIC_PASSIVE_DWELL 110
+
+/**
+ * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *     cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_req_tx_cmd {
+       __le32 tx_flags;
+       __le32 rate_n_flags;
+       u8 sta_id;
+       u8 reserved[3];
+} __packed;
+
+enum iwl_scan_channel_flags_lmac {
+       IWL_UNIFIED_SCAN_CHANNEL_FULL           = BIT(27),
+       IWL_UNIFIED_SCAN_CHANNEL_PARTIAL        = BIT(28),
+};
+
+/**
+ * iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags:             bits 1-20: directed scan to i'th ssid
+ *                     other bits &enum iwl_scan_channel_flags_lmac
+ * @channel_number:    channel number 1-13 etc
+ * @iter_count:                scan iteration on this channel
+ * @iter_interval:     interval in seconds between iterations on one channel
+ */
+struct iwl_scan_channel_cfg_lmac {
+       __le32 flags;
+       __le16 channel_num;
+       __le16 iter_count;
+       __le32 iter_interval;
+} __packed;
+
+/*
+ * iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwl_scan_probe_segment {
+       __le16 offset;
+       __le16 len;
+} __packed;
+
+/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwl_scan_probe_req {
+       struct iwl_scan_probe_segment mac_header;
+       struct iwl_scan_probe_segment band_data[2];
+       struct iwl_scan_probe_segment common_data;
+       u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+enum iwl_scan_channel_flags {
+       IWL_SCAN_CHANNEL_FLAG_EBS               = BIT(0),
+       IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE      = BIT(1),
+       IWL_SCAN_CHANNEL_FLAG_CACHE_ADD         = BIT(2),
+};
+
+/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwl_scan_channel_flgs
+ * @non_ebs_ratio: how many regular scan iteration before EBS
+ */
+struct iwl_scan_channel_opt {
+       __le16 flags;
+       __le16 non_ebs_ratio;
+} __packed;
+
+/**
+ * iwl_mvm_lmac_scan_flags
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
+ *     without filtering.
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
+ * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
+ * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
+ * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching
+ * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
+ */
+enum iwl_mvm_lmac_scan_flags {
+       IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL         = BIT(0),
+       IWL_MVM_LMAC_SCAN_FLAG_PASSIVE          = BIT(1),
+       IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION   = BIT(2),
+       IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE    = BIT(3),
+       IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS   = BIT(4),
+       IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED       = BIT(5),
+};
+
+enum iwl_scan_priority {
+       IWL_SCAN_PRIORITY_LOW,
+       IWL_SCAN_PRIORITY_MEDIUM,
+       IWL_SCAN_PRIORITY_HIGH,
+};
+
+/**
+ * iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * @reserved1: for alignment and future use
+ * @channel_num: num of channels to scan
+ * @active-dwell: dwell time for active channels
+ * @passive-dwell: dwell time for passive channels
+ * @fragmented-dwell: dwell time for fragmented passive scan
+ * @reserved2: for alignment and future use
+ * @rx_chain_selct: PHY_RX_CHAIN_* flags
+ * @scan_flags: &enum iwl_mvm_lmac_scan_flags
+ * @max_out_time: max time (in TU) to be out of associated channel
+ * @suspend_time: pause scan this long (TUs) when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXON filter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_prio: enum iwl_scan_priority
+ * @iter_num: number of scan iterations
+ * @delay: delay in seconds before first iteration
+ * @schedule: two scheduling plans. The first one is finite, the second one can
+ *     be infinite.
+ * @channel_opt: channel optimization options, for full and partial scan
+ * @data: channel configuration and probe request packet.
+ */
+struct iwl_scan_req_unified_lmac {
+       /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
+       __le32 reserved1;
+       u8 n_channels;
+       u8 active_dwell;
+       u8 passive_dwell;
+       u8 fragmented_dwell;
+       __le16 reserved2;
+       __le16 rx_chain_select;
+       __le32 scan_flags;
+       __le32 max_out_time;
+       __le32 suspend_time;
+       /* RX_ON_FLAGS_API_S_VER_1 */
+       __le32 flags;
+       __le32 filter_flags;
+       struct iwl_scan_req_tx_cmd tx_cmd[2];
+       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+       __le32 scan_prio;
+       /* SCAN_REQ_PERIODIC_PARAMS_API_S */
+       __le32 iter_num;
+       __le32 delay;
+       struct iwl_scan_offload_schedule schedule[2];
+       struct iwl_scan_channel_opt channel_opt[2];
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_lmac_scan_results_notif - scan results for one channel -
+ *     SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwl_lmac_scan_results_notif {
+       u8 channel;
+       u8 band;
+       u8 probe_status;
+       u8 num_probe_not_sent;
+       __le32 duration;
+} __packed;
+
+/**
+ * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels)
+ *     SCAN_COMPLETE_NTF_API_S_VER_3
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: an array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_lmac_scan_complete_notif {
+       u8 scanned_channels;
+       u8 status;
+       u8 bt_status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+       struct iwl_scan_results_notif results[];
+} __packed;
+
+/**
+ * iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwl_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwl_scan_ebs_status
+ * @time_after_last_iter; time in seconds elapsed after last iteration
+ */
+struct iwl_periodic_scan_complete {
+       u8 last_schedule_line;
+       u8 last_schedule_iteration;
+       u8 status;
+       u8 ebs_status;
+       __le32 time_after_last_iter;
+       __le32 reserved;
+} __packed;
+
 #endif
index 39cebee8016feaab62f005e5e843447784594429..47bd0406355d2738f00477d237c57bcd3d2706f2 100644 (file)
@@ -67,7 +67,7 @@
  * enum iwl_sta_flags - flags for the ADD_STA host command
  * @STA_FLG_REDUCED_TX_PWR_CTRL:
  * @STA_FLG_REDUCED_TX_PWR_DATA:
- * @STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @STA_FLG_DISABLE_TX: set if TX should be disabled
  * @STA_FLG_PS: set if STA is in Power Save
  * @STA_FLG_INVALID: set if STA is invalid
  * @STA_FLG_DLP_EN: Direct Link Protocol is enabled
@@ -91,10 +91,7 @@ enum iwl_sta_flags {
        STA_FLG_REDUCED_TX_PWR_CTRL     = BIT(3),
        STA_FLG_REDUCED_TX_PWR_DATA     = BIT(6),
 
-       STA_FLG_FLG_ANT_A               = (1 << 4),
-       STA_FLG_FLG_ANT_B               = (2 << 4),
-       STA_FLG_FLG_ANT_MSK             = (STA_FLG_FLG_ANT_A |
-                                          STA_FLG_FLG_ANT_B),
+       STA_FLG_DISABLE_TX              = BIT(4),
 
        STA_FLG_PS                      = BIT(8),
        STA_FLG_DRAIN_FLOW              = BIT(12),
index 6cc5f52b807f1bc343ea632674e215423c3abb1d..d6073f67b212e12c2c7565cadf44c50268763cd8 100644 (file)
  * @TX_CMD_FLG_ACK: expect ACK from receiving station
  * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
  *     Otherwise, use rate_n_flags from the TX command
- * @TX_CMD_FLG_BA: this frame is a block ack
  * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
  *     Must set TX_CMD_FLG_ACK with this flag.
- * @TX_CMD_FLG_TXOP_PROT: protect frame with full TXOP protection
  * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
  * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
  * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
  * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
  *     Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
  * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
- * @TX_CMD_FLG_NEXT_FRAME: this frame includes information of the next frame
  * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
  *     Should be set for beacons and probe responses
  * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
  * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
- * @TX_CMD_FLG_AGG_START: allow this frame to start aggregation
  * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
  *     Should be set for 26/30 length MAC headers
  * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
@@ -103,7 +99,6 @@ enum iwl_tx_flags {
        TX_CMD_FLG_PROT_REQUIRE         = BIT(0),
        TX_CMD_FLG_ACK                  = BIT(3),
        TX_CMD_FLG_STA_RATE             = BIT(4),
-       TX_CMD_FLG_BA                   = BIT(5),
        TX_CMD_FLG_BAR                  = BIT(6),
        TX_CMD_FLG_TXOP_PROT            = BIT(7),
        TX_CMD_FLG_VHT_NDPA             = BIT(8),
@@ -113,11 +108,9 @@ enum iwl_tx_flags {
        TX_CMD_FLG_BT_DIS               = BIT(12),
        TX_CMD_FLG_SEQ_CTL              = BIT(13),
        TX_CMD_FLG_MORE_FRAG            = BIT(14),
-       TX_CMD_FLG_NEXT_FRAME           = BIT(15),
        TX_CMD_FLG_TSF                  = BIT(16),
        TX_CMD_FLG_CALIB                = BIT(17),
        TX_CMD_FLG_KEEP_SEQ_CTL         = BIT(18),
-       TX_CMD_FLG_AGG_START            = BIT(19),
        TX_CMD_FLG_MH_PAD               = BIT(20),
        TX_CMD_FLG_RESP_TO_DRV          = BIT(21),
        TX_CMD_FLG_CCMP_AGG             = BIT(22),
@@ -191,8 +184,6 @@ enum iwl_tx_flags {
  * struct iwl_tx_cmd - TX command struct to FW
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
- * @next_frame_len: same as len, but for next frame (0 if not applicable)
- *     Used for fragmentation and bursting, but not in 11n aggregation.
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
  *     cleared. Combination of RATE_MCS_*
@@ -210,8 +201,6 @@ enum iwl_tx_flags {
  * @data_retry_limit: max attempts to send the data packet
  * @tid_spec: TID/tspec
  * @pm_frame_timeout: PM TX frame timeout
- * @driver_txop: duration od EDCA TXOP, in 32-usec units. Set this if not
- *     specified by HCCA protocol
  *
  * The byte count (both len and next_frame_len) includes MAC header
  * (24/26/30/32 bytes)
@@ -241,8 +230,7 @@ struct iwl_tx_cmd {
        u8 initial_rate_index;
        u8 reserved2;
        u8 key[16];
-       __le16 next_frame_flags;
-       __le16 reserved3;
+       __le32 reserved3;
        __le32 life_time;
        __le32 dram_lsb_ptr;
        u8 dram_msb_ptr;
@@ -250,7 +238,7 @@ struct iwl_tx_cmd {
        u8 data_retry_limit;
        u8 tid_tspec;
        __le16 pm_frame_timeout;
-       __le16 driver_txop;
+       __le16 reserved4;
        u8 payload[0];
        struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_3 */
@@ -548,6 +536,20 @@ struct iwl_beacon_notif {
        __le32 ibss_mgr_status;
 } __packed;
 
+/**
+ * struct iwl_extended_beacon_notif - notifies about beacon transmission
+ * @beacon_notify_hdr: tx response command associated with the beacon
+ * @tsf: last beacon tsf
+ * @ibss_mgr_status: whether IBSS is manager
+ * @gp2: last beacon time in gp2
+ */
+struct iwl_extended_beacon_notif {
+       struct iwl_mvm_tx_resp beacon_notify_hdr;
+       __le64 tsf;
+       __le32 ibss_mgr_status;
+       __le32 gp2;
+} __packed; /* BEACON_NTFY_API_S_VER_5 */
+
 /**
  * enum iwl_dump_control - dump (flush) control flags
  * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
index 309a9b9a94fecc26918f967e7b9e7a01374d43b3..b8e4e78d601b98b6e67a444f5f7cb09985a7bef1 100644 (file)
@@ -86,6 +86,8 @@ enum {
 
 #define IWL_MVM_STATION_COUNT  16
 
+#define IWL_MVM_TDLS_STA_COUNT 4
+
 /* commands */
 enum {
        MVM_ALIVE = 0x1,
@@ -135,6 +137,7 @@ enum {
        SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
        SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
        MATCH_FOUND_NOTIFICATION = 0xd9,
+       SCAN_ITERATION_COMPLETE = 0xe7,
 
        /* Phy */
        PHY_CONFIGURATION_CMD = 0x6a,
@@ -163,7 +166,6 @@ enum {
        BEACON_NOTIFICATION = 0x90,
        BEACON_TEMPLATE_CMD = 0x91,
        TX_ANT_CONFIGURATION_CMD = 0x98,
-       BT_CONFIG = 0x9b,
        STATISTICS_NOTIFICATION = 0x9d,
        EOSP_NOTIFICATION = 0x9e,
        REDUCE_TX_POWER_CMD = 0x9f,
@@ -185,6 +187,10 @@ enum {
        BT_COEX_PRIO_TABLE = 0xcc,
        BT_COEX_PROT_ENV = 0xcd,
        BT_PROFILE_NOTIFICATION = 0xce,
+       BT_CONFIG = 0x9b,
+       BT_COEX_UPDATE_SW_BOOST = 0x5a,
+       BT_COEX_UPDATE_CORUN_LUT = 0x5b,
+       BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
        BT_COEX_CI = 0x5d,
 
        REPLY_SF_CFG_CMD = 0xd1,
@@ -534,6 +540,9 @@ enum iwl_time_event_type {
        /* WiDi Sync Events */
        TE_WIDI_TX_SYNC,
 
+       /* Channel Switch NoA */
+       TE_P2P_GO_CSA_NOA,
+
        TE_MAX
 }; /* MAC_EVENT_TYPE_API_E_VER_1 */
 
index 725ba49576bf640a41194cc4539176f70118c9d0..96b9cf8137e7f158df7e7db16cc32a779ca1f2be 100644 (file)
@@ -67,6 +67,7 @@
 #include "iwl-prph.h"
 #include "fw-api.h"
 #include "mvm.h"
+#include "time-event.h"
 
 const u8 iwl_mvm_ac_to_tx_fifo[] = {
        IWL_MVM_TX_FIFO_VO,
@@ -903,7 +904,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
        struct iwl_mac_beacon_cmd beacon_cmd = {};
        struct ieee80211_tx_info *info;
        u32 beacon_skb_len;
-       u32 rate;
+       u32 rate, tx_flags;
 
        if (WARN_ON(!beacon))
                return -EINVAL;
@@ -913,14 +914,17 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
        /* TODO: for now the beacon template id is set to be the mac context id.
         * Might be better to handle it as another resource ... */
        beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+       info = IEEE80211_SKB_CB(beacon);
 
        /* Set up TX command fields */
        beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len);
        beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id;
        beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
-       beacon_cmd.tx.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
-                                            TX_CMD_FLG_BT_DIS  |
-                                            TX_CMD_FLG_TSF);
+       tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
+       tx_flags |=
+               iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
+                                               TX_CMD_FLG_BT_PRIO_POS;
+       beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags);
 
        mvm->mgmt_last_antenna_idx =
                iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
@@ -930,8 +934,6 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
                            RATE_MCS_ANT_POS);
 
-       info = IEEE80211_SKB_CB(beacon);
-
        if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
                rate = IWL_FIRST_OFDM_RATE;
        } else {
@@ -968,7 +970,7 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
        WARN_ON(vif->type != NL80211_IFTYPE_AP &&
                vif->type != NL80211_IFTYPE_ADHOC);
 
-       beacon = ieee80211_beacon_get(mvm->hw, vif);
+       beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
        if (!beacon)
                return -ENOMEM;
 
@@ -1199,31 +1201,94 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        return 0;
 }
 
+static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *csa_vif, u32 gp2)
+{
+       struct iwl_mvm_vif *mvmvif =
+                       iwl_mvm_vif_from_mac80211(csa_vif);
+
+       if (!ieee80211_csa_is_complete(csa_vif)) {
+               int c = ieee80211_csa_update_counter(csa_vif);
+
+               iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
+               if (csa_vif->p2p &&
+                   !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2) {
+                       u32 rel_time = (c + 1) *
+                                      csa_vif->bss_conf.beacon_int -
+                                      IWL_MVM_CHANNEL_SWITCH_TIME;
+                       u32 apply_time = gp2 + rel_time * 1024;
+
+                       iwl_mvm_schedule_csa_noa(mvm, csa_vif,
+                                                IWL_MVM_CHANNEL_SWITCH_TIME -
+                                                IWL_MVM_CHANNEL_SWITCH_MARGIN,
+                                                apply_time);
+               }
+       } else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
+               /* we don't have CSA NoA scheduled yet, switch now */
+               ieee80211_csa_finish(csa_vif);
+               RCU_INIT_POINTER(mvm->csa_vif, NULL);
+       }
+}
+
 int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                            struct iwl_rx_cmd_buffer *rxb,
                            struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_beacon_notif *beacon = (void *)pkt->data;
-       u16 status __maybe_unused =
-               le16_to_cpu(beacon->beacon_notify_hdr.status.status);
-       u32 rate __maybe_unused =
-               le32_to_cpu(beacon->beacon_notify_hdr.initial_rate);
+       struct iwl_mvm_tx_resp *beacon_notify_hdr;
+       struct ieee80211_vif *csa_vif;
+       struct ieee80211_vif *tx_blocked_vif;
+       u64 tsf;
 
        lockdep_assert_held(&mvm->mutex);
 
-       IWL_DEBUG_RX(mvm, "beacon status %#x retries:%d tsf:0x%16llX rate:%d\n",
-                    status & TX_STATUS_MSK,
-                    beacon->beacon_notify_hdr.failure_frame,
-                    le64_to_cpu(beacon->tsf),
-                    rate);
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_CAPA_EXTENDED_BEACON) {
+               struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
 
-       if (unlikely(mvm->csa_vif && mvm->csa_vif->csa_active)) {
-               if (!ieee80211_csa_is_complete(mvm->csa_vif)) {
-                       iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm->csa_vif);
-               } else {
-                       ieee80211_csa_finish(mvm->csa_vif);
-                       mvm->csa_vif = NULL;
+               beacon_notify_hdr = &beacon->beacon_notify_hdr;
+               tsf = le64_to_cpu(beacon->tsf);
+               mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+       } else {
+               struct iwl_beacon_notif *beacon = (void *)pkt->data;
+
+               beacon_notify_hdr = &beacon->beacon_notify_hdr;
+               tsf = le64_to_cpu(beacon->tsf);
+       }
+
+       IWL_DEBUG_RX(mvm,
+                    "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
+                    le16_to_cpu(beacon_notify_hdr->status.status) &
+                                                               TX_STATUS_MSK,
+                    beacon_notify_hdr->failure_frame, tsf,
+                    mvm->ap_last_beacon_gp2,
+                    le32_to_cpu(beacon_notify_hdr->initial_rate));
+
+       csa_vif = rcu_dereference_protected(mvm->csa_vif,
+                                           lockdep_is_held(&mvm->mutex));
+       if (unlikely(csa_vif && csa_vif->csa_active))
+               iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2);
+
+       tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+                                               lockdep_is_held(&mvm->mutex));
+       if (unlikely(tx_blocked_vif)) {
+               struct iwl_mvm_vif *mvmvif =
+                       iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+               /*
+                * The channel switch is started and we have blocked the
+                * stations. If this is the first beacon (the timeout wasn't
+                * set), set the unblock timeout, otherwise countdown
+                */
+               if (!mvm->csa_tx_block_bcn_timeout)
+                       mvm->csa_tx_block_bcn_timeout =
+                               IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
+               else
+                       mvm->csa_tx_block_bcn_timeout--;
+
+               /* Check if the timeout is expired, and unblock tx */
+               if (mvm->csa_tx_block_bcn_timeout == 0) {
+                       iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+                       RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                }
        }
 
index 9bfb90680cdcb2e6d8d716a04798322c4c9b2174..2eb6ebee446708308168da65d926f394e743597d 100644 (file)
@@ -80,6 +80,8 @@
 #include "fw-api-scan.h"
 #include "iwl-phy-db.h"
 #include "testmode.h"
+#include "iwl-fw-error-dump.h"
+#include "iwl-prph.h"
 
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@ -241,6 +243,21 @@ iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
        }
 }
 
+static int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+       iwl_mvm_ref(mvm, ref_type);
+
+       if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+                               !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
+                               HZ)) {
+               WARN_ON_ONCE(1);
+               iwl_mvm_unref(mvm, ref_type);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
 {
        int i;
@@ -276,6 +293,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_TIMING_BEACON_ONLY |
                    IEEE80211_HW_CONNECTION_MONITOR |
+                   IEEE80211_HW_CHANCTX_STA_CSA |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
                    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
 
@@ -310,6 +328,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
        }
 
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+               hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
+
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
        hw->chanctx_data_size = sizeof(u16);
@@ -381,6 +402,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+                              NL80211_FEATURE_LOW_PRIORITY_SCAN |
                               NL80211_FEATURE_P2P_GO_OPPPS;
 
        mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -556,9 +578,6 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
        case IEEE80211_AMPDU_TX_OPERATIONAL:
-               iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
-               tx_agg_ref = true;
-
                /*
                 * for tx start, wait synchronously until D0i3 exit to
                 * get the correct sequence number for the tid.
@@ -567,12 +586,11 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                 * by the trans layer (unlike commands), so wait for
                 * d0i3 exit in these cases as well.
                 */
-               if (!wait_event_timeout(mvm->d0i3_exit_waitq,
-                         !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
-                       WARN_ON_ONCE(1);
-                       iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
-                       return -EIO;
-               }
+               ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
+               if (ret)
+                       return ret;
+
+               tx_agg_ref = true;
                break;
        default:
                break;
@@ -644,6 +662,104 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        mvmvif->phy_ctxt = NULL;
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
+{
+       struct iwl_fw_error_dump_file *dump_file;
+       struct iwl_fw_error_dump_data *dump_data;
+       struct iwl_fw_error_dump_info *dump_info;
+       const struct fw_img *img;
+       u32 sram_len, sram_ofs;
+       u32 file_len, rxf_len;
+       unsigned long flags;
+       u32 trans_len;
+       int reg_val;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvm->fw_error_dump)
+               return;
+
+       img = &mvm->fw->img[mvm->cur_ucode];
+       sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+       sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+       /* reading buffer size */
+       reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+       rxf_len = (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+
+       /* the register holds the value divided by 128 */
+       rxf_len = rxf_len << 7;
+
+       file_len = sizeof(*dump_file) +
+                  sizeof(*dump_data) * 3 +
+                  sram_len +
+                  rxf_len +
+                  sizeof(*dump_info);
+
+       trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
+       if (trans_len)
+               file_len += trans_len;
+
+       dump_file = vzalloc(file_len);
+       if (!dump_file)
+               return;
+
+       mvm->fw_error_dump = dump_file;
+
+       dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+       dump_file->file_len = cpu_to_le32(file_len);
+       dump_data = (void *)dump_file->data;
+
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+       dump_data->len = cpu_to_le32(sizeof(*dump_info));
+       dump_info = (void *) dump_data->data;
+       dump_info->device_family =
+               mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
+                       cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+                       cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+       memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
+              sizeof(dump_info->fw_human_readable));
+       strncpy(dump_info->dev_human_readable, mvm->cfg->name,
+               sizeof(dump_info->dev_human_readable));
+       strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
+               sizeof(dump_info->bus_human_readable));
+
+       dump_data = iwl_fw_error_next_data(dump_data);
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+       dump_data->len = cpu_to_le32(rxf_len);
+
+       if (iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+               u32 *rxf = (void *)dump_data->data;
+               int i;
+
+               for (i = 0; i < (rxf_len / sizeof(u32)); i++) {
+                       iwl_trans_write_prph(mvm->trans,
+                                            RXF_LD_FENCE_OFFSET_ADDR,
+                                            i * sizeof(u32));
+                       rxf[i] = iwl_trans_read_prph(mvm->trans,
+                                                    RXF_FIFO_RD_FENCE_ADDR);
+               }
+               iwl_trans_release_nic_access(mvm->trans, &flags);
+       }
+
+       dump_data = iwl_fw_error_next_data(dump_data);
+       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
+       dump_data->len = cpu_to_le32(sram_len);
+       iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_data->data,
+                                sram_len);
+
+       if (trans_len) {
+               void *buf = iwl_fw_error_next_data(dump_data);
+               u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
+                                                        trans_len);
+               dump_data = (void *)((u8 *)buf + real_trans_len);
+               dump_file->file_len =
+                       cpu_to_le32(file_len - trans_len + real_trans_len);
+       }
+}
+#endif
+
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -695,6 +811,16 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
                iwl_mvm_restart_cleanup(mvm);
 
        ret = iwl_mvm_up(mvm);
+
+       if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+               /* Something went wrong - we need to finish some cleanup
+                * that normally iwl_mvm_mac_restart_complete() below
+                * would do.
+                */
+               clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+               iwl_mvm_d0i3_enable_tx(mvm, NULL);
+       }
+
        mutex_unlock(&mvm->mutex);
 
        return ret;
@@ -792,6 +918,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
+       /*
+        * make sure D0i3 exit is completed, otherwise a target access
+        * during tx queue configuration could be done when still in
+        * D0i3 state.
+        */
+       ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
+       if (ret)
+               return ret;
+
        /*
         * Not much to do here. The stack will not allow interface
         * types or combinations that we didn't advertise, so we
@@ -906,6 +1041,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
  out_unlock:
        mutex_unlock(&mvm->mutex);
 
+       iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
+
        return ret;
 }
 
@@ -1285,7 +1422,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        if (changes & BSS_CHANGED_ASSOC) {
                if (bss_conf->assoc) {
                        /* add quota for this interface */
-                       ret = iwl_mvm_update_quotas(mvm, vif);
+                       ret = iwl_mvm_update_quotas(mvm, NULL);
                        if (ret) {
                                IWL_ERR(mvm, "failed to update quotas\n");
                                return;
@@ -1432,7 +1569,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        /* power updated needs to be done before quotas */
        iwl_mvm_power_update_mac(mvm);
 
-       ret = iwl_mvm_update_quotas(mvm, vif);
+       ret = iwl_mvm_update_quotas(mvm, NULL);
        if (ret)
                goto out_quota_failed;
 
@@ -1470,7 +1607,20 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       /* Handle AP stop while in CSA */
+       if (rcu_access_pointer(mvm->csa_vif) == vif) {
+               iwl_mvm_remove_time_event(mvm, mvmvif,
+                                         &mvmvif->time_event_data);
+               RCU_INIT_POINTER(mvm->csa_vif, NULL);
+       }
+
+       if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
+               RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+               mvm->csa_tx_block_bcn_timeout = 0;
+       }
+
        mvmvif->ap_ibss_active = false;
+       mvm->ap_last_beacon_gp2 = 0;
 
        iwl_mvm_bt_coex_vif_change(mvm);
 
@@ -1524,7 +1674,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
        mutex_lock(&mvm->mutex);
 
        if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
-               iwl_mvm_sched_scan_stop(mvm, true);
+               iwl_mvm_scan_offload_stop(mvm, true);
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
@@ -1544,19 +1694,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
 
 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
-                              struct cfg80211_scan_request *req)
+                              struct ieee80211_scan_request *hw_req)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct cfg80211_scan_request *req = &hw_req->req;
        int ret;
 
-       if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
+       if (req->n_channels == 0 ||
+           req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
                return -EINVAL;
 
        mutex_lock(&mvm->mutex);
 
        switch (mvm->scan_status) {
        case IWL_MVM_SCAN_SCHED:
-               ret = iwl_mvm_sched_scan_stop(mvm, true);
+               ret = iwl_mvm_scan_offload_stop(mvm, true);
                if (ret) {
                        ret = -EBUSY;
                        goto out;
@@ -1571,7 +1723,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
-       ret = iwl_mvm_scan_request(mvm, vif, req);
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
+       else
+               ret = iwl_mvm_scan_request(mvm, vif, req);
+
        if (ret)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 out:
@@ -1687,6 +1843,70 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       int count = 0;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+               if (!sta || IS_ERR(sta) || !sta->tdls)
+                       continue;
+
+               if (vif) {
+                       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+                       if (mvmsta->vif != vif)
+                               continue;
+               }
+
+               count++;
+       }
+
+       return count;
+}
+
+static void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif,
+                                     bool sta_added)
+{
+       int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
+
+       /*
+        * Disable ps when the first TDLS sta is added and re-enable it
+        * when the last TDLS sta is removed
+        */
+       if ((tdls_sta_cnt == 1 && sta_added) ||
+           (tdls_sta_cnt == 0 && !sta_added))
+               iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+               if (!sta || IS_ERR(sta) || !sta->tdls)
+                       continue;
+
+               mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
+                               NL80211_TDLS_TEARDOWN,
+                               WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+                               GFP_KERNEL);
+       }
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -1725,7 +1945,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                        ret = -EINVAL;
                        goto out_unlock;
                }
+
+               if (sta->tdls &&
+                   (vif->p2p ||
+                    iwl_mvm_tdls_sta_count(mvm, NULL) ==
+                                               IWL_MVM_TDLS_STA_COUNT ||
+                    iwl_mvm_phy_ctx_count(mvm) > 1)) {
+                       IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
+                       ret = -EBUSY;
+                       goto out_unlock;
+               }
+
                ret = iwl_mvm_add_sta(mvm, vif, sta);
+               if (sta->tdls && ret == 0)
+                       iwl_mvm_recalc_tdls_state(mvm, vif, true);
        } else if (old_state == IEEE80211_STA_NONE &&
                   new_state == IEEE80211_STA_AUTH) {
                /*
@@ -1743,6 +1976,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                             true);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
+
+               /* we don't support TDLS during DCM */
+               if (iwl_mvm_phy_ctx_count(mvm) > 1)
+                       iwl_mvm_teardown_tdls_peers(mvm);
+
                /* enable beacon filtering */
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
                ret = 0;
@@ -1760,6 +1998,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        } else if (old_state == IEEE80211_STA_NONE &&
                   new_state == IEEE80211_STA_NOTEXIST) {
                ret = iwl_mvm_rm_sta(mvm, vif, sta);
+               if (sta->tdls)
+                       iwl_mvm_recalc_tdls_state(mvm, vif, false);
        } else {
                ret = -EIO;
        }
@@ -1831,10 +2071,22 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+                                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+
+       mutex_lock(&mvm->mutex);
+       /* Protect the session to hear the TDLS setup response on the channel */
+       iwl_mvm_protect_session(mvm, vif, duration, duration, 100);
+       mutex_unlock(&mvm->mutex);
+}
+
 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
                                        struct cfg80211_sched_scan_request *req,
-                                       struct ieee80211_sched_scan_ies *ies)
+                                       struct ieee80211_scan_ies *ies)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
@@ -1872,15 +2124,21 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mvm->scan_status = IWL_MVM_SCAN_SCHED;
 
-       ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
-       if (ret)
-               goto err;
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+               ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+               if (ret)
+                       goto err;
+       }
 
        ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
        if (ret)
                goto err;
 
-       ret = iwl_mvm_sched_scan_start(mvm, req);
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
+       else
+               ret = iwl_mvm_sched_scan_start(mvm, req);
+
        if (!ret)
                goto out;
 err:
@@ -1899,7 +2157,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
        int ret;
 
        mutex_lock(&mvm->mutex);
-       ret = iwl_mvm_sched_scan_stop(mvm, false);
+       ret = iwl_mvm_scan_offload_stop(mvm, false);
        mutex_unlock(&mvm->mutex);
        iwl_mvm_wait_for_async_handlers(mvm);
 
@@ -2133,17 +2391,17 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
        return 0;
 }
 
-static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
-                              struct ieee80211_chanctx_conf *ctx)
+static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
+                                struct ieee80211_chanctx_conf *ctx)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
        struct iwl_mvm_phy_ctxt *phy_ctxt;
        int ret;
 
+       lockdep_assert_held(&mvm->mutex);
+
        IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
 
-       mutex_lock(&mvm->mutex);
        phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
        if (!phy_ctxt) {
                ret = -ENOSPC;
@@ -2161,19 +2419,40 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
        iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
        *phy_ctxt_id = phy_ctxt->id;
 out:
+       return ret;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+                              struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+       ret = __iwl_mvm_add_chanctx(mvm, ctx);
        mutex_unlock(&mvm->mutex);
+
        return ret;
 }
 
+static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
+                                    struct ieee80211_chanctx_conf *ctx)
+{
+       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+       struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+       lockdep_assert_held(&mvm->mutex);
+
+       iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
+}
+
 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
                                   struct ieee80211_chanctx_conf *ctx)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
-       struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
+       __iwl_mvm_remove_chanctx(mvm, ctx);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -2202,17 +2481,17 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
-static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
-                                     struct ieee80211_vif *vif,
-                                     struct ieee80211_chanctx_conf *ctx)
+static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct ieee80211_chanctx_conf *ctx,
+                                       bool switching_chanctx)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
        struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
-       mutex_lock(&mvm->mutex);
+       lockdep_assert_held(&mvm->mutex);
 
        mvmvif->phy_ctxt = phy_ctxt;
 
@@ -2229,18 +2508,18 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
                 * (in bss_info_changed), similarly for IBSS.
                 */
                ret = 0;
-               goto out_unlock;
+               goto out;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_MONITOR:
                break;
        default:
                ret = -EINVAL;
-               goto out_unlock;
+               goto out;
        }
 
        ret = iwl_mvm_binding_add_vif(mvm, vif);
        if (ret)
-               goto out_unlock;
+               goto out;
 
        /*
         * Power state must be updated before quotas,
@@ -2254,67 +2533,164 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
         */
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                mvmvif->monitor_active = true;
-               ret = iwl_mvm_update_quotas(mvm, vif);
+               ret = iwl_mvm_update_quotas(mvm, NULL);
                if (ret)
                        goto out_remove_binding;
        }
 
        /* Handle binding during CSA */
-       if (vif->type == NL80211_IFTYPE_AP) {
-               iwl_mvm_update_quotas(mvm, vif);
+       if ((vif->type == NL80211_IFTYPE_AP) ||
+           (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) {
+               iwl_mvm_update_quotas(mvm, NULL);
                iwl_mvm_mac_ctxt_changed(mvm, vif, false);
        }
 
-       goto out_unlock;
+       goto out;
 
- out_remove_binding:
+out_remove_binding:
        iwl_mvm_binding_remove_vif(mvm, vif);
        iwl_mvm_power_update_mac(mvm);
- out_unlock:
-       mutex_unlock(&mvm->mutex);
+out:
        if (ret)
                mvmvif->phy_ctxt = NULL;
        return ret;
 }
-
-static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
-                                        struct ieee80211_vif *vif,
-                                        struct ieee80211_chanctx_conf *ctx)
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     struct ieee80211_chanctx_conf *ctx)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
 
        mutex_lock(&mvm->mutex);
+       ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
+       mutex_unlock(&mvm->mutex);
+
+       return ret;
+}
+
+static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
+                                          struct ieee80211_vif *vif,
+                                          struct ieee80211_chanctx_conf *ctx,
+                                          bool switching_chanctx)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_vif *disabled_vif = NULL;
+
+       lockdep_assert_held(&mvm->mutex);
 
        iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
        switch (vif->type) {
        case NL80211_IFTYPE_ADHOC:
-               goto out_unlock;
+               goto out;
        case NL80211_IFTYPE_MONITOR:
                mvmvif->monitor_active = false;
-               iwl_mvm_update_quotas(mvm, NULL);
                break;
        case NL80211_IFTYPE_AP:
                /* This part is triggered only during CSA */
                if (!vif->csa_active || !mvmvif->ap_ibss_active)
-                       goto out_unlock;
+                       goto out;
+
+               /* Set CS bit on all the stations */
+               iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
+
+               /* Save blocked iface, the timeout is set on the next beacon */
+               rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
 
                mvmvif->ap_ibss_active = false;
-               iwl_mvm_update_quotas(mvm, NULL);
-               /*TODO: bt_coex notification here? */
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (!switching_chanctx)
+                       break;
+
+               disabled_vif = vif;
+
+               iwl_mvm_mac_ctxt_changed(mvm, vif, true);
+               break;
        default:
                break;
        }
 
+       iwl_mvm_update_quotas(mvm, disabled_vif);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
-out_unlock:
+out:
        mvmvif->phy_ctxt = NULL;
        iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct ieee80211_chanctx_conf *ctx)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mutex_lock(&mvm->mutex);
+       __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
        mutex_unlock(&mvm->mutex);
 }
 
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif_chanctx_switch *vifs,
+                                     int n_vifs,
+                                     enum ieee80211_chanctx_switch_mode mode)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       /* we only support SWAP_CONTEXTS and with a single-vif right now */
+       if (mode != CHANCTX_SWMODE_SWAP_CONTEXTS || n_vifs > 1)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&mvm->mutex);
+       __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+       __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
+
+       ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
+       if (ret) {
+               IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
+               goto out_reassign;
+       }
+
+       ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
+                                          true);
+       if (ret) {
+               IWL_ERR(mvm,
+                       "failed to assign new_ctx during channel switch\n");
+               goto out_remove;
+       }
+
+       goto out;
+
+out_remove:
+       __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
+
+out_reassign:
+       ret = __iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx);
+       if (ret) {
+               IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
+               goto out_restart;
+       }
+
+       ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+                                          true);
+       if (ret) {
+               IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+               goto out_restart;
+       }
+
+       goto out;
+
+out_restart:
+       /* things keep failing, better restart the hw */
+       iwl_mvm_nic_restart(mvm, false);
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
                           struct ieee80211_sta *sta,
                           bool set)
@@ -2402,15 +2778,19 @@ static void iwl_mvm_channel_switch_beacon(struct ieee80211_hw *hw,
                                          struct cfg80211_chan_def *chandef)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct ieee80211_vif *csa_vif;
 
        mutex_lock(&mvm->mutex);
-       if (WARN(mvm->csa_vif && mvm->csa_vif->csa_active,
+
+       csa_vif = rcu_dereference_protected(mvm->csa_vif,
+                                           lockdep_is_held(&mvm->mutex));
+       if (WARN(csa_vif && csa_vif->csa_active,
                 "Another CSA is already in progress"))
                goto out_unlock;
 
        IWL_DEBUG_MAC80211(mvm, "CSA started to freq %d\n",
                           chandef->center_freq1);
-       mvm->csa_vif = vif;
+       rcu_assign_pointer(mvm->csa_vif, vif);
 
 out_unlock:
        mutex_unlock(&mvm->mutex);
@@ -2467,6 +2847,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
        .sta_rc_update = iwl_mvm_sta_rc_update,
        .conf_tx = iwl_mvm_mac_conf_tx,
        .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+       .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
        .flush = iwl_mvm_mac_flush,
        .sched_scan_start = iwl_mvm_mac_sched_scan_start,
        .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
@@ -2479,6 +2860,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
        .change_chanctx = iwl_mvm_change_chanctx,
        .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
        .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+       .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
 
        .start_ap = iwl_mvm_start_ap_ibss,
        .stop_ap = iwl_mvm_stop_ap_ibss,
index fcc6c29482d0ef516bba48459b09230b9ead4007..785e5232c757f4e4647f723fab5729bd141368eb 100644 (file)
 #define IWL_RSSI_OFFSET 50
 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
 
+/*
+ * The CSA NoA is scheduled IWL_MVM_CHANNEL_SWITCH_TIME TUs before "beacon 0"
+ * TBTT. This value should be big enough to ensure that we switch in time.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME 40
+
+/*
+ * This value (in TUs) is used to fine tune the CSA NoA end time which should
+ * be just before "beacon 0" TBTT.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
+
+/*
+ * Number of beacons to transmit on a new channel until we unblock tx to
+ * the stations, even if we didn't identify them on a new channel
+ */
+#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
+
 enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BK = 0,
        IWL_MVM_TX_FIFO_BE,
@@ -230,11 +248,21 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_USER,
        IWL_MVM_REF_TX,
        IWL_MVM_REF_TX_AGG,
+       IWL_MVM_REF_ADD_IF,
        IWL_MVM_REF_EXIT_WORK,
 
        IWL_MVM_REF_COUNT,
 };
 
+enum iwl_bt_force_ant_mode {
+       BT_FORCE_ANT_DIS = 0,
+       BT_FORCE_ANT_AUTO,
+       BT_FORCE_ANT_BT,
+       BT_FORCE_ANT_WIFI,
+
+       BT_FORCE_ANT_MAX,
+};
+
 /**
 * struct iwl_mvm_vif_bf_data - beacon filtering related data
 * @bf_enabled: indicates if beacon filtering is enabled
@@ -523,7 +551,7 @@ struct iwl_mvm {
 
        /* Scan status, cmd (pre-allocated) and auxiliary station */
        enum iwl_scan_status scan_status;
-       struct iwl_scan_cmd *scan_cmd;
+       void *scan_cmd;
        struct iwl_mcast_filter_cmd *mcast_filter_cmd;
 
        /* rx chain antennas set through debugfs for the scan command */
@@ -586,10 +614,6 @@ struct iwl_mvm {
        /* -1 for always, 0 for never, >0 for that many times */
        s8 restart_fw;
        void *fw_error_dump;
-       void *fw_error_sram;
-       u32 fw_error_sram_len;
-       u32 *fw_error_rxf;
-       u32 fw_error_rxf_len;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -624,11 +648,16 @@ struct iwl_mvm {
 
        /* BT-Coex */
        u8 bt_kill_msk;
+
+       struct iwl_bt_coex_profile_notif_old last_bt_notif_old;
+       struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old;
        struct iwl_bt_coex_profile_notif last_bt_notif;
        struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+
        u32 last_ant_isol;
        u8 last_corun_lut;
        u8 bt_tx_prio;
+       enum iwl_bt_force_ant_mode bt_force_ant_mode;
 
        /* Thermal Throttling and CTkill */
        struct iwl_mvm_tt_mgmt thermal_throttle;
@@ -647,7 +676,12 @@ struct iwl_mvm {
        /* Indicate if device power save is allowed */
        bool ps_disabled;
 
-       struct ieee80211_vif *csa_vif;
+       struct ieee80211_vif __rcu *csa_vif;
+       struct ieee80211_vif __rcu *csa_tx_blocked_vif;
+       u8 csa_tx_block_bcn_timeout;
+
+       /* system time of last beacon (for AP/GO interface) */
+       u32 ap_last_beacon_gp2;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -719,11 +753,6 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
                               struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
-void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
-void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
-#endif
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 
@@ -809,6 +838,7 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
                          struct iwl_mvm_phy_ctxt *ctxt);
 void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
                            struct iwl_mvm_phy_ctxt *ctxt);
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
 
 /* MAC (virtual interface) programming */
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -835,7 +865,8 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 
 /* Quota management */
-int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif);
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+                         struct ieee80211_vif *disabled_vif);
 
 /* Scanning */
 int iwl_mvm_scan_request(struct iwl_mvm *mvm,
@@ -854,15 +885,24 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
 int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                              struct ieee80211_vif *vif,
                              struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_sched_scan_ies *ies);
+                             struct ieee80211_scan_ies *ies);
 int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct cfg80211_sched_scan_request *req);
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
+int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
+int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb,
+                                   struct iwl_device_cmd *cmd);
+
+/* Unified scan */
+int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_scan_request *req);
+int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   struct cfg80211_sched_scan_request *req,
+                                   struct ieee80211_scan_ies *ies);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -963,11 +1003,30 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
                                    enum ieee80211_band band);
 u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                           struct ieee80211_tx_info *info, u8 ac);
 
+bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
+void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
+int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
+int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb,
+                                struct iwl_device_cmd *cmd);
+void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                              enum ieee80211_rssi_event rssi_event);
+u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
+                                   struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
+                                        struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
+                                       enum ieee80211_band band);
+int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                     struct iwl_rx_cmd_buffer *rxb,
+                                     struct iwl_device_cmd *cmd);
+
 enum iwl_bt_kill_msk {
        BT_KILL_MSK_DEFAULT,
        BT_KILL_MSK_SCO_HID_A2DP,
@@ -1039,4 +1098,9 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                      bool added_vif);
 
+/* TDLS */
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
+
 #endif /* __IWL_MVM_H__ */
index 808f78f6fbf9fe478553bd54264f784d98c01597..b04805ccb443d1a7b1532a96881a3d739933fd96 100644 (file)
@@ -69,7 +69,9 @@
 
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 7000
+#define IWL_MAX_NVM_SECTION_SIZE       0x1b58
+#define IWL_MAX_NVM_8000A_SECTION_SIZE 0xffc
+#define IWL_MAX_NVM_8000B_SECTION_SIZE 0x1ffc
 
 #define NVM_WRITE_OPCODE 1
 #define NVM_READ_OPCODE 0
@@ -219,7 +221,7 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
  * without overflowing, so no check is needed.
  */
 static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
-                               u8 *data)
+                               u8 *data, u32 size_read)
 {
        u16 length, offset = 0;
        int ret;
@@ -231,6 +233,13 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
 
        /* Read the NVM until exhausted (reading less than requested) */
        while (ret == length) {
+               /* Check no memory assumptions fail and cause an overflow */
+               if ((size_read + offset + length) >
+                   mvm->cfg->base_params->eeprom_size) {
+                       IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
+                       return -ENOBUFS;
+               }
+
                ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
                if (ret < 0) {
                        IWL_DEBUG_EEPROM(mvm->trans->dev,
@@ -326,6 +335,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                u8 data[];
        } *file_sec;
        const u8 *eof, *temp;
+       int max_section_size;
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
@@ -334,6 +344,14 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
 
        IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
 
+       /* Maximal size depends on HW family and step */
+       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               max_section_size = IWL_MAX_NVM_SECTION_SIZE;
+       else if ((mvm->trans->hw_rev & 0xc) == 0) /* Family 8000 A-step */
+               max_section_size = IWL_MAX_NVM_8000A_SECTION_SIZE;
+       else /* Family 8000 B-step */
+               max_section_size = IWL_MAX_NVM_8000B_SECTION_SIZE;
+
        /*
         * Obtain NVM image via request_firmware. Since we already used
         * request_firmware_nowait() for the firmware binary load and only
@@ -392,7 +410,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                                                le16_to_cpu(file_sec->word1));
                }
 
-               if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
+               if (section_size > max_section_size) {
                        IWL_ERR(mvm, "ERROR - section too large (%d)\n",
                                section_size);
                        ret = -EINVAL;
@@ -459,6 +477,7 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
 int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
 {
        int ret, section;
+       u32 size_read = 0;
        u8 *nvm_buffer, *temp;
 
        if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
@@ -475,9 +494,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
                        return -ENOMEM;
                for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
                        /* we override the constness for initial read */
-                       ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+                       ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
+                                                  size_read);
                        if (ret < 0)
                                continue;
+                       size_read += ret;
                        temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
                        if (!temp) {
                                ret = -ENOMEM;
@@ -509,6 +530,8 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
                        }
 #endif
                }
+               if (!size_read)
+                       IWL_ERR(mvm, "OTP is blank\n");
                kfree(nvm_buffer);
        }
 
index cc2f7de396deb396d2b20e261b4c2877137dfa3e..7d7b2fbe7cd1b3906c2378059b99776ccaddf65a 100644 (file)
@@ -166,8 +166,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
        WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
                 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
 
-       /* silicon bits */
-       reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+       /*
+        * TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
+        * shouldn't be set to any non-zero value. The same is supposed to be
+        * true of the other HW, but unsetting them (such as the 7260) causes
+        * automatic tests to fail on seemingly unrelated errors. Need to
+        * further investigate this, but for now we'll separate cases.
+        */
+       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
 
        iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
                                CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
@@ -233,7 +240,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
                   iwl_mvm_rx_scan_offload_complete_notif, true),
-       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
                   false),
 
        RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
@@ -284,6 +291,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(SCAN_OFFLOAD_ABORT_CMD),
        CMD(SCAN_OFFLOAD_COMPLETE),
        CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+       CMD(SCAN_ITERATION_COMPLETE),
        CMD(POWER_TABLE_CMD),
        CMD(WEP_KEY),
        CMD(REPLY_RX_PHY_CMD),
@@ -324,6 +332,9 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(REPLY_THERMAL_MNG_BACKOFF),
        CMD(MAC_PM_POWER_TABLE),
        CMD(BT_COEX_CI),
+       CMD(BT_COEX_UPDATE_SW_BOOST),
+       CMD(BT_COEX_UPDATE_CORUN_LUT),
+       CMD(BT_COEX_UPDATE_REDUCED_TXP),
        CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
        CMD(ANTENNA_COUPLING_NOTIFICATION),
 };
@@ -502,9 +513,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                }
        }
 
-       scan_size = sizeof(struct iwl_scan_cmd) +
-               mvm->fw->ucode_capa.max_probe_length +
-               (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+               scan_size = sizeof(struct iwl_scan_req_unified_lmac) +
+                       sizeof(struct iwl_scan_channel_cfg_lmac) *
+                               mvm->fw->ucode_capa.n_scan_channels +
+                       sizeof(struct iwl_scan_probe_req);
+       else
+               scan_size = sizeof(struct iwl_scan_cmd) +
+                       mvm->fw->ucode_capa.max_probe_length +
+                       mvm->fw->ucode_capa.n_scan_channels *
+                               sizeof(struct iwl_scan_channel);
+
        mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
        if (!mvm->scan_cmd)
                goto out_free;
@@ -549,8 +568,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 
        kfree(mvm->scan_cmd);
        vfree(mvm->fw_error_dump);
-       kfree(mvm->fw_error_sram);
-       kfree(mvm->fw_error_rxf);
        kfree(mvm->mcast_filter_cmd);
        mvm->mcast_filter_cmd = NULL;
 
@@ -754,7 +771,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
        module_put(THIS_MODULE);
 }
 
-static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
 {
        iwl_abort_notification_waits(&mvm->notif_wait);
 
@@ -811,93 +828,24 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
                reprobe->dev = mvm->trans->dev;
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
-       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
+       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+                  (!fw_error || mvm->restart_fw)) {
                /* don't let the transport/FW power down */
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
-               if (mvm->restart_fw > 0)
+               if (fw_error && mvm->restart_fw > 0)
                        mvm->restart_fw--;
                ieee80211_restart_hw(mvm->hw);
        }
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
-{
-       struct iwl_fw_error_dump_file *dump_file;
-       struct iwl_fw_error_dump_data *dump_data;
-       u32 file_len;
-       u32 trans_len;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (mvm->fw_error_dump)
-               return;
-
-       file_len = mvm->fw_error_sram_len +
-                  mvm->fw_error_rxf_len +
-                  sizeof(*dump_file) +
-                  sizeof(*dump_data) * 2;
-
-       trans_len = iwl_trans_dump_data(mvm->trans, NULL, 0);
-       if (trans_len)
-               file_len += trans_len;
-
-       dump_file = vmalloc(file_len);
-       if (!dump_file)
-               return;
-
-       mvm->fw_error_dump = dump_file;
-
-       dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
-       dump_file->file_len = cpu_to_le32(file_len);
-       dump_data = (void *)dump_file->data;
-       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
-       dump_data->len = cpu_to_le32(mvm->fw_error_rxf_len);
-       memcpy(dump_data->data, mvm->fw_error_rxf, mvm->fw_error_rxf_len);
-
-       dump_data = iwl_mvm_fw_error_next_data(dump_data);
-       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_SRAM);
-       dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
-
-       /*
-        * No need for lock since at the stage the FW isn't loaded. So it
-        * can't assert - we are the only one who can possibly be accessing
-        * mvm->fw_error_sram right now.
-        */
-       memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
-
-       kfree(mvm->fw_error_rxf);
-       mvm->fw_error_rxf = NULL;
-       mvm->fw_error_rxf_len = 0;
-
-       kfree(mvm->fw_error_sram);
-       mvm->fw_error_sram = NULL;
-       mvm->fw_error_sram_len = 0;
-
-       if (trans_len) {
-               void *buf = iwl_mvm_fw_error_next_data(dump_data);
-               u32 real_trans_len = iwl_trans_dump_data(mvm->trans, buf,
-                                                        trans_len);
-               dump_data = (void *)((u8 *)buf + real_trans_len);
-               dump_file->file_len =
-                       cpu_to_le32(file_len - trans_len + real_trans_len);
-       }
-}
-#endif
-
 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
        iwl_mvm_dump_nic_error_log(mvm);
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       iwl_mvm_fw_error_sram_dump(mvm);
-       iwl_mvm_fw_error_rxf_dump(mvm);
-#endif
-
-       iwl_mvm_nic_restart(mvm);
+       iwl_mvm_nic_restart(mvm, true);
 }
 
 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
@@ -905,7 +853,7 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
        WARN_ON(1);
-       iwl_mvm_nic_restart(mvm);
+       iwl_mvm_nic_restart(mvm, true);
 }
 
 struct iwl_d0i3_iter_data {
index 539f3a942d437565ab6ba9accd06f71874985af4..6cc243f7cf602b8f926baa11d4a59c30db965e0c 100644 (file)
@@ -261,3 +261,29 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
 
        ctxt->ref--;
 }
+
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       unsigned long *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (!mvmvif->phy_ctxt)
+               return;
+
+       if (vif->type == NL80211_IFTYPE_STATION ||
+           vif->type == NL80211_IFTYPE_AP)
+               __set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
+{
+       unsigned long phy_ctxt_counter = 0;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_binding_iterator,
+                                                  &phy_ctxt_counter);
+
+       return hweight8(phy_ctxt_counter);
+}
index c182a8baf685857d3c2857443d53ae978e8646a8..2b2d10800a55e1b90f3f4da4c91bd6cfe09629a8 100644 (file)
@@ -246,30 +246,10 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
-static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
-                                     struct ieee80211_vif *vif)
-{
-       unsigned long *data = _data;
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       if (!mvmvif->phy_ctxt)
-               return;
-
-       if (vif->type == NL80211_IFTYPE_STATION ||
-           vif->type == NL80211_IFTYPE_AP)
-               __set_bit(mvmvif->phy_ctxt->id, data);
-}
-
 static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       unsigned long phy_ctxt_counter = 0;
-
-       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
-                                                  IEEE80211_IFACE_ITER_NORMAL,
-                                                  iwl_mvm_binding_iterator,
-                                                  &phy_ctxt_counter);
 
        if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
                    ETH_ALEN))
@@ -291,7 +271,7 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
         * Avoid using uAPSD if client is in DCM -
         * low latency issue in Miracast
         */
-       if (hweight8(phy_ctxt_counter) >= 2)
+       if (iwl_mvm_phy_ctx_count(mvm) >= 2)
                return false;
 
        return true;
@@ -503,6 +483,7 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
 }
 
 struct iwl_power_vifs {
+       struct iwl_mvm *mvm;
        struct ieee80211_vif *bf_vif;
        struct ieee80211_vif *bss_vif;
        struct ieee80211_vif *p2p_vif;
@@ -512,6 +493,8 @@ struct iwl_power_vifs {
        bool bss_active;
        bool ap_active;
        bool monitor_active;
+       bool bss_tdls;
+       bool p2p_tdls;
 };
 
 static void iwl_mvm_power_iterator(void *_data, u8 *mac,
@@ -548,6 +531,8 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
                /* only a single MAC of the same type */
                WARN_ON(power_iterator->p2p_vif);
                power_iterator->p2p_vif = vif;
+               power_iterator->p2p_tdls =
+                       !!iwl_mvm_tdls_sta_count(power_iterator->mvm, vif);
                if (mvmvif->phy_ctxt)
                        if (mvmvif->phy_ctxt->id < MAX_PHYS)
                                power_iterator->p2p_active = true;
@@ -557,6 +542,8 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
                /* only a single MAC of the same type */
                WARN_ON(power_iterator->bss_vif);
                power_iterator->bss_vif = vif;
+               power_iterator->bss_tdls =
+                       !!iwl_mvm_tdls_sta_count(power_iterator->mvm, vif);
                if (mvmvif->phy_ctxt)
                        if (mvmvif->phy_ctxt->id < MAX_PHYS)
                                power_iterator->bss_active = true;
@@ -599,13 +586,15 @@ iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
                ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
 
        /* enable PM on bss if bss stand alone */
-       if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+       if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active &&
+           !vifs->bss_tdls) {
                bss_mvmvif->pm_enabled = true;
                return;
        }
 
        /* enable PM on p2p if p2p stand alone */
-       if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+       if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active &&
+           !vifs->p2p_tdls) {
                if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PM)
                        p2p_mvmvif->pm_enabled = true;
                return;
@@ -831,7 +820,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
 {
        struct iwl_mvm_vif *mvmvif;
-       struct iwl_power_vifs vifs = {};
+       struct iwl_power_vifs vifs = {
+               .mvm = mvm,
+       };
        bool ba_enable;
        int ret;
 
index ba68d7b8450508d9c7b123500b654d2195b5613a..4e20b3ce2b6a320e7fd163fe34a02674788645fc 100644 (file)
@@ -73,7 +73,7 @@ struct iwl_mvm_quota_iterator_data {
        int colors[MAX_BINDINGS];
        int low_latency[MAX_BINDINGS];
        int n_low_latency_bindings;
-       struct ieee80211_vif *new_vif;
+       struct ieee80211_vif *disabled_vif;
 };
 
 static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
@@ -83,13 +83,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        u16 id;
 
-       /*
-        * We'll account for the new interface (if any) below,
-        * skip it here in case we're not called from within
-        * the add_interface callback (otherwise it won't show
-        * up in iteration)
-        */
-       if (vif == data->new_vif)
+       /* skip disabled interfaces here immediately */
+       if (vif == data->disabled_vif)
                return;
 
        if (!mvmvif->phy_ctxt)
@@ -104,11 +99,6 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
        if (WARN_ON_ONCE(id >= MAX_BINDINGS))
                return;
 
-       if (data->colors[id] < 0)
-               data->colors[id] = mvmvif->phy_ctxt->color;
-       else
-               WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
-
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                if (vif->bss_conf.assoc)
@@ -130,6 +120,11 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
                return;
        }
 
+       if (data->colors[id] < 0)
+               data->colors[id] = mvmvif->phy_ctxt->color;
+       else
+               WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
        data->n_interfaces[id]++;
 
        if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
@@ -171,14 +166,15 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
 #endif
 }
 
-int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+                         struct ieee80211_vif *disabled_vif)
 {
        struct iwl_time_quota_cmd cmd = {};
        int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
        struct iwl_mvm_quota_iterator_data data = {
                .n_interfaces = {},
                .colors = { -1, -1, -1, -1 },
-               .new_vif = newvif,
+               .disabled_vif = disabled_vif,
        };
 
        lockdep_assert_held(&mvm->mutex);
@@ -193,10 +189,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_quota_iterator, &data);
-       if (newvif) {
-               data.new_vif = NULL;
-               iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
-       }
 
        /*
         * The FW's scheduling session consists of
@@ -285,6 +277,14 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 
        iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
 
+       /* check that we have non-zero quota for all valid bindings */
+       for (i = 0; i < MAX_BINDINGS; i++) {
+               if (cmd.quotas[i].id_and_color == cpu_to_le32(FW_CTXT_INVALID))
+                       continue;
+               WARN_ONCE(cmd.quotas[i].quota == 0,
+                         "zero quota on binding %d\n", i);
+       }
+
        ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
                                   sizeof(cmd), &cmd);
        if (ret)
index 306a6caa486889b4dc27ea53fc350f246a7433f8..c70e959bf0e3d443b17ca8e055ee3dd57fda30a2 100644 (file)
@@ -927,7 +927,7 @@ static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
        u8 low;
        u16 high_low;
        u16 rate_mask;
-       struct iwl_mvm *mvm = lq_sta->drv;
+       struct iwl_mvm *mvm = lq_sta->pers.drv;
 
        rate_mask = rs_get_supported_rates(lq_sta, rate);
        high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
@@ -946,7 +946,7 @@ static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
 static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                                          struct rs_rate *rate)
 {
-       struct iwl_mvm *mvm = lq_sta->drv;
+       struct iwl_mvm *mvm = lq_sta->pers.drv;
 
        if (is_legacy(rate)) {
                /* No column to downgrade from Legacy */
@@ -1026,14 +1026,14 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
        if (!lq_sta) {
                IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
                return;
-       } else if (!lq_sta->drv) {
+       } else if (!lq_sta->pers.drv) {
                IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
                return;
        }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        /* Disable last tx check if we are debugging with fixed rate */
-       if (lq_sta->dbg_fixed_rate) {
+       if (lq_sta->pers.dbg_fixed_rate) {
                IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
                return;
        }
@@ -1405,7 +1405,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
        int flush_interval_passed = 0;
        struct iwl_mvm *mvm;
 
-       mvm = lq_sta->drv;
+       mvm = lq_sta->pers.drv;
        active_tbl = lq_sta->active_tbl;
 
        tbl = &(lq_sta->lq_info[active_tbl]);
@@ -1865,11 +1865,11 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
        int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-       if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+       if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
                IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
-                              lq_sta->dbg_fixed_txp_reduction);
-               lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
-               return cur != lq_sta->dbg_fixed_txp_reduction;
+                              lq_sta->pers.dbg_fixed_txp_reduction);
+               lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
+               return cur != lq_sta->pers.dbg_fixed_txp_reduction;
        }
 #endif
 
@@ -2382,7 +2382,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        }
 
        /* Treat uninitialized rate scaling data same as non-existing. */
-       if (lq_sta && !lq_sta->drv) {
+       if (lq_sta && !lq_sta->pers.drv) {
                IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
                mvm_sta = NULL;
        }
@@ -2401,12 +2401,18 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
                          gfp_t gfp)
 {
        struct iwl_mvm_sta *sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
-       struct iwl_op_mode *op_mode __maybe_unused =
-                       (struct iwl_op_mode *)mvm_rate;
-       struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+       struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
+       struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
+       struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta;
 
        IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
+       lq_sta->pers.drv = mvm;
+#ifdef CONFIG_MAC80211_DEBUGFS
+       lq_sta->pers.dbg_fixed_rate = 0;
+       lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+#endif
+
        return &sta_priv->lq_sta;
 }
 
@@ -2552,7 +2558,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        sta_priv = (struct iwl_mvm_sta *)sta->drv_priv;
        lq_sta = &sta_priv->lq_sta;
-       memset(lq_sta, 0, sizeof(*lq_sta));
+
+       /* clear all non-persistent lq data */
+       memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
 
        sband = hw->wiphy->bands[band];
 
@@ -2630,17 +2638,12 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        /* as default allow aggregation for all tids */
        lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
-       lq_sta->drv = mvm;
 
        /* Set last_txrate_idx to lowest rate */
        lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
        if (sband->band == IEEE80211_BAND_5GHZ)
                lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
        lq_sta->is_agg = 0;
-#ifdef CONFIG_MAC80211_DEBUGFS
-       lq_sta->dbg_fixed_rate = 0;
-       lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
-#endif
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
 #endif
@@ -2811,12 +2814,12 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
        u8 ant = initial_rate->ant;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-       if (lq_sta->dbg_fixed_rate) {
+       if (lq_sta->pers.dbg_fixed_rate) {
                rs_build_rates_table_from_fixed(mvm, lq_cmd,
                                                lq_sta->band,
-                                               lq_sta->dbg_fixed_rate);
+                                               lq_sta->pers.dbg_fixed_rate);
                lq_cmd->reduced_tpc = 0;
-               ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+               ant = (lq_sta->pers.dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
                        RATE_MCS_ANT_POS;
        } else
 #endif
@@ -2926,14 +2929,14 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
        lq_sta->active_mimo2_rate  = 0x1FD0;    /* 6 - 60 MBits, no 9, no CCK */
 
        IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
-                      lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+                      lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
 
-       if (lq_sta->dbg_fixed_rate) {
+       if (lq_sta->pers.dbg_fixed_rate) {
                struct rs_rate rate;
-               rs_rate_from_ucode_rate(lq_sta->dbg_fixed_rate,
+               rs_rate_from_ucode_rate(lq_sta->pers.dbg_fixed_rate,
                                        lq_sta->band, &rate);
                rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate);
-               iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, false);
+               iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
        }
 }
 
@@ -2946,16 +2949,16 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
        size_t buf_size;
        u32 parsed_rate;
 
-       mvm = lq_sta->drv;
+       mvm = lq_sta->pers.drv;
        memset(buf, 0, sizeof(buf));
        buf_size = min(count, sizeof(buf) -  1);
        if (copy_from_user(buf, user_buf, buf_size))
                return -EFAULT;
 
        if (sscanf(buf, "%x", &parsed_rate) == 1)
-               lq_sta->dbg_fixed_rate = parsed_rate;
+               lq_sta->pers.dbg_fixed_rate = parsed_rate;
        else
-               lq_sta->dbg_fixed_rate = 0;
+               lq_sta->pers.dbg_fixed_rate = 0;
 
        rs_program_fix_rate(mvm, lq_sta);
 
@@ -2974,7 +2977,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        struct iwl_mvm *mvm;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct rs_rate *rate = &tbl->rate;
-       mvm = lq_sta->drv;
+       mvm = lq_sta->pers.drv;
        buff = kmalloc(2048, GFP_KERNEL);
        if (!buff)
                return -ENOMEM;
@@ -2984,7 +2987,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
                        lq_sta->total_failed, lq_sta->total_success,
                        lq_sta->active_legacy_rate);
        desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-                       lq_sta->dbg_fixed_rate);
+                       lq_sta->pers.dbg_fixed_rate);
        desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
            (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
            (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
@@ -3182,31 +3185,20 @@ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
 static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
 {
        struct iwl_lq_sta *lq_sta = mvm_sta;
-       lq_sta->rs_sta_dbgfs_scale_table_file =
-               debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
-                                   lq_sta, &rs_sta_dbgfs_scale_table_ops);
-       lq_sta->rs_sta_dbgfs_stats_table_file =
-               debugfs_create_file("rate_stats_table", S_IRUSR, dir,
-                                   lq_sta, &rs_sta_dbgfs_stats_table_ops);
-       lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
-               debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
-                                   lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
-       lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
-               debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
-                                 &lq_sta->tx_agg_tid_en);
-       lq_sta->rs_sta_dbgfs_reduced_txp_file =
-               debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
-                                 &lq_sta->dbg_fixed_txp_reduction);
+       debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+                           lq_sta, &rs_sta_dbgfs_scale_table_ops);
+       debugfs_create_file("rate_stats_table", S_IRUSR, dir,
+                           lq_sta, &rs_sta_dbgfs_stats_table_ops);
+       debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+                           lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
+       debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+                         &lq_sta->tx_agg_tid_en);
+       debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+                         &lq_sta->pers.dbg_fixed_txp_reduction);
 }
 
 static void rs_remove_debugfs(void *mvm, void *mvm_sta)
 {
-       struct iwl_lq_sta *lq_sta = mvm_sta;
-       debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-       debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
 }
 #endif
 
index 374a83d7db25a98dd76da34d3fdff9557e48664f..f27b9d687a25e9e1854c6e6a9a04331e8994717e 100644 (file)
@@ -349,16 +349,6 @@ struct iwl_lq_sta {
        struct iwl_lq_cmd lq;
        struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
        u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct dentry *rs_sta_dbgfs_scale_table_file;
-       struct dentry *rs_sta_dbgfs_stats_table_file;
-       struct dentry *rs_sta_dbgfs_drv_tx_stats_file;
-       struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
-       struct dentry *rs_sta_dbgfs_reduced_txp_file;
-       u32 dbg_fixed_rate;
-       u8 dbg_fixed_txp_reduction;
-#endif
-       struct iwl_mvm *drv;
 
        /* used to be in sta_info */
        int last_txrate_idx;
@@ -369,6 +359,15 @@ struct iwl_lq_sta {
 
        /* tx power reduce for this sta */
        int tpc_reduce;
+
+       /* persistent fields - initialized only once - keep last! */
+       struct {
+#ifdef CONFIG_MAC80211_DEBUGFS
+               u32 dbg_fixed_rate;
+               u8 dbg_fixed_txp_reduction;
+#endif
+               struct iwl_mvm *drv;
+       } pers;
 };
 
 /* Initialize station's rate scaling information after adding station */
index cf7276967acdec6439392c82e44e94de52d453c8..4b98987fc4133f6b7d7c0a21a1d11e57658a35f7 100644 (file)
@@ -258,6 +258,23 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 
        memset(&rx_status, 0, sizeof(rx_status));
 
+       /*
+        * We have tx blocked stations (with CS bit). If we heard frames from
+        * a blocked station on a new channel we can TX to it again.
+        */
+       if (unlikely(mvm->csa_tx_block_bcn_timeout)) {
+               struct ieee80211_sta *sta;
+
+               rcu_read_lock();
+
+               sta = ieee80211_find_sta(
+                       rcu_dereference(mvm->csa_tx_blocked_vif), hdr->addr2);
+               if (sta)
+                       iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false);
+
+               rcu_read_unlock();
+       }
+
        /*
         * drop the packet if it has failed being decrypted by HW
         */
index eac2b424f6a06447a79ba20e9447d4d32fbe5cd6..004b1f5d031429a2798cc1d35464daa6ae59db00 100644 (file)
@@ -97,10 +97,9 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
        return cpu_to_le16(rx_chain);
 }
 
-static inline __le32
-iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
+static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
 {
-       if (req->channels[0]->band == IEEE80211_BAND_2GHZ)
+       if (band == IEEE80211_BAND_2GHZ)
                return cpu_to_le32(PHY_BAND_24);
        else
                return cpu_to_le32(PHY_BAND_5);
@@ -130,19 +129,19 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
  * request list, is not copied here, but inserted directly to the probe
  * request.
  */
-static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
-                                   struct cfg80211_scan_request *req,
-                                   int first)
+static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
+                                   struct cfg80211_ssid *ssids,
+                                   int n_ssids, int first)
 {
        int fw_idx, req_idx;
 
-       for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
+       for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
             req_idx--, fw_idx++) {
-               cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
-               cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
-               memcpy(cmd->direct_scan[fw_idx].ssid,
-                      req->ssids[req_idx].ssid,
-                      req->ssids[req_idx].ssid_len);
+               cmd_ssid[fw_idx].id = WLAN_EID_SSID;
+               cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
+               memcpy(cmd_ssid[fw_idx].ssid,
+                      ssids[req_idx].ssid,
+                      ssids[req_idx].ssid_len);
        }
 }
 
@@ -204,7 +203,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
  */
 static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
                                  int n_ssids, const u8 *ssid, int ssid_len,
-                                 const u8 *ie, int ie_len,
+                                 const u8 *band_ie, int band_ie_len,
+                                 const u8 *common_ie, int common_ie_len,
                                  int left)
 {
        int len = 0;
@@ -244,12 +244,19 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
 
        len += ssid_len + 2;
 
-       if (WARN_ON(left < ie_len))
+       if (WARN_ON(left < band_ie_len + common_ie_len))
                return len;
 
-       if (ie && ie_len) {
-               memcpy(pos, ie, ie_len);
-               len += ie_len;
+       if (band_ie && band_ie_len) {
+               memcpy(pos, band_ie, band_ie_len);
+               pos += band_ie_len;
+               len += band_ie_len;
+       }
+
+       if (common_ie && common_ie_len) {
+               memcpy(pos, common_ie, common_ie_len);
+               pos += common_ie_len;
+               len += common_ie_len;
        }
 
        return (u16)len;
@@ -267,7 +274,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
 
 static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                     struct ieee80211_vif *vif,
-                                    int n_ssids,
+                                    int n_ssids, u32 flags,
                                     struct iwl_mvm_scan_params *params)
 {
        bool global_bound = false;
@@ -289,6 +296,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                params->max_out_time = 250;
        }
 
+       if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+               params->max_out_time = 200;
+
 not_bound:
 
        for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
@@ -325,22 +335,20 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
 
        IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
        mvm->scan_status = IWL_MVM_SCAN_OS;
-       memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
-              mvm->fw->ucode_capa.max_probe_length +
-              (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
+       memset(cmd, 0, ksize(cmd));
 
        cmd->channel_count = (u8)req->n_channels;
        cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
        cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
        cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
 
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, &params);
        cmd->max_out_time = cpu_to_le32(params.max_out_time);
        cmd->suspend_time = cpu_to_le32(params.suspend_time);
        if (params.passive_fragmented)
                cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
 
-       cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
+       cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
        cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
                                        MAC_FILTER_IN_BEACON);
 
@@ -367,7 +375,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
        }
 
-       iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
+       iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->ssids, req->n_ssids,
+                               basic_ssid ? 1 : 0);
 
        cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
                                           TX_CMD_FLG_BT_DIS);
@@ -382,7 +391,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                            (struct ieee80211_mgmt *)cmd->data,
                            vif->addr,
                            req->n_ssids, ssid, ssid_len,
-                           req->ie, req->ie_len,
+                           req->ie, req->ie_len, NULL, 0,
                            mvm->fw->ucode_capa.max_probe_length));
 
        iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
@@ -441,16 +450,27 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        return 0;
 }
 
-int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb,
+                                   struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_sched_scan_results *notif = (void *)pkt->data;
+       u8 client_bitmap = 0;
 
-       if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
-               ieee80211_sched_scan_results(mvm->hw);
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)) {
+               struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+               client_bitmap = notif->client_bitmap;
+       }
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN ||
+           client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+               if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
+                       IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+                       ieee80211_sched_scan_results(mvm->hw);
+               } else {
+                       IWL_DEBUG_SCAN(mvm, "Scan results\n");
+               }
        }
 
        return 0;
@@ -494,7 +514,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
        };
 }
 
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_scan_abort;
        static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
@@ -535,33 +555,52 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
                                           struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+       u8 status, ebs_status;
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) {
+               struct iwl_periodic_scan_complete *scan_notif;
+
+               scan_notif = (void *)pkt->data;
+               status = scan_notif->status;
+               ebs_status = scan_notif->ebs_status;
+       } else  {
+               struct iwl_scan_offload_complete *scan_notif;
 
+               scan_notif = (void *)pkt->data;
+               status = scan_notif->status;
+               ebs_status = scan_notif->ebs_status;
+       }
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
        IWL_DEBUG_SCAN(mvm,
-                      "Scheduled scan completed, status %s EBS status %s:%d\n",
-                      scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
-                      "completed" : "aborted", scan_notif->ebs_status ==
-                      IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
-                      scan_notif->ebs_status);
+                      "%s completed, status %s, EBS status %s\n",
+                      mvm->scan_status == IWL_MVM_SCAN_SCHED ?
+                               "Scheduled scan" : "Scan",
+                      status == IWL_SCAN_OFFLOAD_COMPLETED ?
+                               "completed" : "aborted",
+                      ebs_status == IWL_SCAN_EBS_SUCCESS ?
+                               "success" : "failed");
 
 
        /* only call mac80211 completion if the stop was initiated by FW */
        if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
                mvm->scan_status = IWL_MVM_SCAN_NONE;
                ieee80211_sched_scan_stopped(mvm->hw);
+       } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               ieee80211_scan_completed(mvm->hw,
+                                        status == IWL_SCAN_OFFLOAD_ABORTED);
        }
 
-       mvm->last_ebs_successful = !scan_notif->ebs_status;
+       mvm->last_ebs_successful = !ebs_status;
 
        return 0;
 }
 
 static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct ieee80211_sched_scan_ies *ies,
+                                         struct ieee80211_scan_ies *ies,
                                          enum ieee80211_band band,
                                          struct iwl_tx_cmd *cmd,
                                          u8 *data)
@@ -577,7 +616,8 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
        cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
                                         vif->addr,
                                         1, NULL, 0,
-                                        ies->ie[band], ies->len[band],
+                                        ies->ies[band], ies->len[band],
+                                        ies->common_ies, ies->common_ie_len,
                                         SCAN_OFFLOAD_PROBE_REQ_SIZE);
        cmd->len = cpu_to_le16(cmd_len);
 }
@@ -621,8 +661,8 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
 }
 
 static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
-                                       struct iwl_scan_offload_cmd *scan,
-                                       u32 *ssid_bitmap)
+                                       struct iwl_ssid_ie *direct_scan,
+                                       u32 *ssid_bitmap, bool basic_ssid)
 {
        int i, j;
        int index;
@@ -636,10 +676,10 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
                /* skip empty SSID matchsets */
                if (!req->match_sets[i].ssid.ssid_len)
                        continue;
-               scan->direct_scan[i].id = WLAN_EID_SSID;
-               scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
-               memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
-                      scan->direct_scan[i].len);
+               direct_scan[i].id = WLAN_EID_SSID;
+               direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+               memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+                      direct_scan[i].len);
        }
 
        /* add SSIDs from scan SSID list */
@@ -647,14 +687,14 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
        for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
                index = iwl_ssid_exist(req->ssids[j].ssid,
                                       req->ssids[j].ssid_len,
-                                      scan->direct_scan);
+                                      direct_scan);
                if (index < 0) {
-                       if (!req->ssids[j].ssid_len)
+                       if (!req->ssids[j].ssid_len && basic_ssid)
                                continue;
-                       scan->direct_scan[i].id = WLAN_EID_SSID;
-                       scan->direct_scan[i].len = req->ssids[j].ssid_len;
-                       memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
-                              scan->direct_scan[i].len);
+                       direct_scan[i].id = WLAN_EID_SSID;
+                       direct_scan[i].len = req->ssids[j].ssid_len;
+                       memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
+                              direct_scan[i].len);
                        *ssid_bitmap |= BIT(i + 1);
                        i++;
                } else {
@@ -665,12 +705,19 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
 
 static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                                  struct cfg80211_sched_scan_request *req,
-                                 struct iwl_scan_channel_cfg *channels,
+                                 u8 *channels_buffer,
                                  enum ieee80211_band band,
                                  int *head,
                                  u32 ssid_bitmap,
                                  struct iwl_mvm_scan_params *params)
 {
+       u32 n_channels = mvm->fw->ucode_capa.n_scan_channels;
+       __le32 *type = (__le32 *)channels_buffer;
+       __le16 *channel_number = (__le16 *)(type + n_channels);
+       __le16 *iter_count = channel_number + n_channels;
+       __le32 *iter_interval = (__le32 *)(iter_count + n_channels);
+       u8 *active_dwell = (u8 *)(iter_interval + n_channels);
+       u8 *passive_dwell = active_dwell + n_channels;
        int i, index = 0;
 
        for (i = 0; i < req->n_channels; i++) {
@@ -682,34 +729,33 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                index = *head;
                (*head)++;
 
-               channels->channel_number[index] = cpu_to_le16(chan->hw_value);
-               channels->dwell_time[index][0] = params->dwell[band].active;
-               channels->dwell_time[index][1] = params->dwell[band].passive;
+               channel_number[index] = cpu_to_le16(chan->hw_value);
+               active_dwell[index] = params->dwell[band].active;
+               passive_dwell[index] = params->dwell[band].passive;
 
-               channels->iter_count[index] = cpu_to_le16(1);
-               channels->iter_interval[index] = 0;
+               iter_count[index] = cpu_to_le16(1);
+               iter_interval[index] = 0;
 
                if (!(chan->flags & IEEE80211_CHAN_NO_IR))
-                       channels->type[index] |=
+                       type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
 
-               channels->type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
-                                           IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+               type[index] |= cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
+                                          IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
 
                if (chan->flags & IEEE80211_CHAN_NO_HT40)
-                       channels->type[index] |=
+                       type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
 
                /* scan for all SSIDs from req->ssids */
-               channels->type[index] |= cpu_to_le32(ssid_bitmap);
+               type[index] |= cpu_to_le32(ssid_bitmap);
        }
 }
 
 int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                              struct ieee80211_vif *vif,
                              struct cfg80211_sched_scan_request *req,
-                             struct ieee80211_sched_scan_ies *ies)
+                             struct ieee80211_scan_ies *ies)
 {
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -717,6 +763,9 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
+       u8 *probes;
+       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+                           IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
 
        struct iwl_scan_offload_cfg *scan_cfg;
        struct iwl_host_cmd cmd = {
@@ -727,24 +776,29 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+                 mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE +
                  2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
 
        scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
        if (!scan_cfg)
                return -ENOMEM;
 
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+       probes = scan_cfg->data +
+               mvm->fw->ucode_capa.n_scan_channels * IWL_SCAN_CHAN_SIZE;
+
+       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
        iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
        scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
 
-       iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+       iwl_scan_offload_build_ssid(req, scan_cfg->scan_cmd.direct_scan,
+                                   &ssid_bitmap, basic_ssid);
        /* build tx frames for supported bands */
        if (band_2ghz) {
                iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
                                              IEEE80211_BAND_2GHZ,
                                              &scan_cfg->scan_cmd.tx_cmd[0],
-                                             scan_cfg->data);
-               iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+                                             probes);
+               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
                                      IEEE80211_BAND_2GHZ, &head,
                                      ssid_bitmap, &params);
        }
@@ -752,9 +806,9 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
                                              IEEE80211_BAND_5GHZ,
                                              &scan_cfg->scan_cmd.tx_cmd[1],
-                                             scan_cfg->data +
+                                             probes +
                                                SCAN_OFFLOAD_PROBE_REQ_SIZE);
-               iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+               iwl_build_channel_cfg(mvm, req, scan_cfg->data,
                                      IEEE80211_BAND_5GHZ, &head,
                                      ssid_bitmap, &params);
        }
@@ -845,11 +899,11 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                .watchdog = IWL_SCHED_SCAN_WATCHDOG,
 
                .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
-               .schedule_line[0].delay = req->interval / 1000,
+               .schedule_line[0].delay = cpu_to_le16(req->interval / 1000),
                .schedule_line[0].full_scan_mul = 1,
 
                .schedule_line[1].iterations = 0xff,
-               .schedule_line[1].delay = req->interval / 1000,
+               .schedule_line[1].delay = cpu_to_le16(req->interval / 1000),
                .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
        };
 
@@ -872,7 +926,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                                    sizeof(scan_req), &scan_req);
 }
 
-static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
 {
        int ret;
        struct iwl_host_cmd cmd = {
@@ -883,7 +937,9 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
        /* Exit instantly with error when device is not ready
         * to receive scan abort command or it does not perform
         * scheduled scan currently */
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
+           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
+            mvm->scan_status != IWL_MVM_SCAN_OS))
                return -EIO;
 
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
@@ -905,16 +961,19 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
+int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
 {
        int ret;
        struct iwl_notification_wait wait_scan_done;
        static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
+       bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
-               IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
+           (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
+            mvm->scan_status != IWL_MVM_SCAN_OS)) {
+               IWL_DEBUG_SCAN(mvm, "No scan to stop\n");
                return 0;
        }
 
@@ -923,14 +982,16 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
                                   ARRAY_SIZE(scan_done_notif),
                                   NULL, NULL);
 
-       ret = iwl_mvm_send_sched_scan_abort(mvm);
+       ret = iwl_mvm_send_scan_offload_abort(mvm);
        if (ret) {
-               IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+               IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
+                              sched ? "offloaded " : "", ret);
                iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
                return ret;
        }
 
-       IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+       IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
+                      sched ? "offloaded " : "");
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
        if (ret)
@@ -943,8 +1004,317 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify)
         */
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 
-       if (notify)
-               ieee80211_sched_scan_stopped(mvm->hw);
+       if (notify) {
+               if (sched)
+                       ieee80211_sched_scan_stopped(mvm->hw);
+               else
+                       ieee80211_scan_completed(mvm->hw, true);
+       }
 
        return 0;
 }
+
+static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+                                            struct iwl_scan_req_tx_cmd *tx_cmd,
+                                            bool no_cck)
+{
+       tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+                                        TX_CMD_FLG_BT_DIS);
+       tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+                                                          IEEE80211_BAND_2GHZ,
+                                                          no_cck);
+       tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+
+       tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+                                        TX_CMD_FLG_BT_DIS);
+       tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+                                                          IEEE80211_BAND_5GHZ,
+                                                          no_cck);
+       tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+}
+
+static void
+iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
+                              struct ieee80211_channel **channels,
+                              int n_channels, u32 ssid_bitmap,
+                              struct iwl_scan_req_unified_lmac *cmd)
+{
+       struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
+       int i;
+
+       for (i = 0; i < n_channels; i++) {
+               channel_cfg[i].channel_num =
+                       cpu_to_le16(channels[i]->hw_value);
+               channel_cfg[i].iter_count = cpu_to_le16(1);
+               channel_cfg[i].iter_interval = 0;
+               channel_cfg[i].flags =
+                       cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
+                                   ssid_bitmap);
+       }
+}
+
+static void
+iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                                struct ieee80211_scan_ies *ies,
+                                struct iwl_scan_req_unified_lmac *cmd)
+{
+       struct iwl_scan_probe_req *preq = (void *)(cmd->data +
+               sizeof(struct iwl_scan_channel_cfg_lmac) *
+                       mvm->fw->ucode_capa.n_scan_channels);
+       struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
+       u8 *pos;
+
+       frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+       eth_broadcast_addr(frame->da);
+       memcpy(frame->sa, vif->addr, ETH_ALEN);
+       eth_broadcast_addr(frame->bssid);
+       frame->seq_ctrl = 0;
+
+       pos = frame->u.probe_req.variable;
+       *pos++ = WLAN_EID_SSID;
+       *pos++ = 0;
+
+       preq->mac_header.offset = 0;
+       preq->mac_header.len = cpu_to_le16(24 + 2);
+
+       memcpy(pos, ies->ies[IEEE80211_BAND_2GHZ],
+              ies->len[IEEE80211_BAND_2GHZ]);
+       preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
+       preq->band_data[0].len = cpu_to_le16(ies->len[IEEE80211_BAND_2GHZ]);
+       pos += ies->len[IEEE80211_BAND_2GHZ];
+
+       memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
+              ies->len[IEEE80211_BAND_5GHZ]);
+       preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
+       preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
+       pos += ies->len[IEEE80211_BAND_5GHZ];
+
+       memcpy(pos, ies->common_ies, ies->common_ie_len);
+       preq->common_data.offset = cpu_to_le16(pos - preq->buf);
+       preq->common_data.len = cpu_to_le16(ies->common_ie_len);
+}
+
+static void
+iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
+                                      struct iwl_scan_req_unified_lmac *cmd,
+                                      struct iwl_mvm_scan_params *params)
+{
+       memset(cmd, 0, ksize(cmd));
+       cmd->active_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].active;
+       cmd->passive_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].passive;
+       /* TODO: Use params; now fragmented isn't used. */
+       cmd->fragmented_dwell = 0;
+       cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+       cmd->max_out_time = cpu_to_le32(params->max_out_time);
+       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+       cmd->iter_num = cpu_to_le32(1);
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+           mvm->last_ebs_successful) {
+               cmd->channel_opt[0].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[1].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+       }
+}
+
+int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_scan_request *req)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = SCAN_OFFLOAD_REQUEST_CMD,
+               .len = { sizeof(struct iwl_scan_req_unified_lmac) +
+                        sizeof(struct iwl_scan_channel_cfg_lmac) *
+                               mvm->fw->ucode_capa.n_scan_channels +
+                        sizeof(struct iwl_scan_probe_req), },
+               .data = { mvm->scan_cmd, },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+       struct iwl_mvm_scan_params params = {};
+       u32 flags;
+       int ssid_bitmap = 0;
+       int ret, i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(mvm->scan_cmd == NULL))
+               return -ENOMEM;
+
+       if (WARN_ON_ONCE(req->req.n_ssids > PROBE_OPTION_MAX ||
+                        req->ies.common_ie_len + req->ies.len[0] +
+                               req->ies.len[1] + 24 + 2 >
+                                       SCAN_OFFLOAD_PROBE_REQ_SIZE ||
+                        req->req.n_channels >
+                               mvm->fw->ucode_capa.n_scan_channels))
+               return -1;
+
+       mvm->scan_status = IWL_MVM_SCAN_OS;
+
+       iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
+                                &params);
+
+       iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
+
+       cmd->n_channels = (u8)req->req.n_channels;
+
+       flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+
+       if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+
+       if (params.passive_fragmented)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
+
+       if (req->req.n_ssids == 0)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+
+       cmd->scan_flags = cpu_to_le32(flags);
+
+       cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
+       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+                                       MAC_FILTER_IN_BEACON);
+       iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
+       iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
+                               req->req.n_ssids, 0);
+
+       cmd->schedule[0].delay = 0;
+       cmd->schedule[0].iterations = 1;
+       cmd->schedule[0].full_scan_mul = 0;
+       cmd->schedule[1].delay = 0;
+       cmd->schedule[1].iterations = 0;
+       cmd->schedule[1].full_scan_mul = 0;
+
+       for (i = 1; i <= req->req.n_ssids; i++)
+               ssid_bitmap |= BIT(i);
+
+       iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
+                                      req->req.n_channels, ssid_bitmap,
+                                      cmd);
+
+       iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &hcmd);
+       if (!ret) {
+               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+       } else {
+               /*
+                * If the scan failed, it usually means that the FW was unable
+                * to allocate the time events. Warn on it, but maybe we
+                * should try to send the command again with different params.
+                */
+               IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               ret = -EIO;
+       }
+       return ret;
+}
+
+int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   struct cfg80211_sched_scan_request *req,
+                                   struct ieee80211_scan_ies *ies)
+{
+       struct iwl_host_cmd hcmd = {
+               .id = SCAN_OFFLOAD_REQUEST_CMD,
+               .len = { sizeof(struct iwl_scan_req_unified_lmac) +
+                        sizeof(struct iwl_scan_channel_cfg_lmac) *
+                               mvm->fw->ucode_capa.n_scan_channels +
+                        sizeof(struct iwl_scan_probe_req), },
+               .data = { mvm->scan_cmd, },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
+       struct iwl_mvm_scan_params params = {};
+       int ret;
+       u32 flags = 0, ssid_bitmap = 0;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(mvm->scan_cmd == NULL))
+               return -ENOMEM;
+
+       if (WARN_ON_ONCE(req->n_ssids > PROBE_OPTION_MAX ||
+                        ies->common_ie_len + ies->len[0] + ies->len[1] + 24 + 2
+                               > SCAN_OFFLOAD_PROBE_REQ_SIZE ||
+                        req->n_channels > mvm->fw->ucode_capa.n_scan_channels))
+               return -ENOBUFS;
+
+       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
+
+       iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
+
+       cmd->n_channels = (u8)req->n_channels;
+
+       if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+               IWL_DEBUG_SCAN(mvm,
+                              "Sending scheduled scan with filtering, n_match_sets %d\n",
+                              req->n_match_sets);
+       } else {
+               IWL_DEBUG_SCAN(mvm,
+                              "Sending Scheduled scan without filtering\n");
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       }
+
+       if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+
+       if (params.passive_fragmented)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
+
+       if (req->n_ssids == 0)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+
+       cmd->scan_flags = cpu_to_le32(flags);
+
+       cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
+       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+                                       MAC_FILTER_IN_BEACON);
+       iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
+       iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
+
+       cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
+       cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
+       cmd->schedule[0].full_scan_mul = 1;
+
+       cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
+       cmd->schedule[1].iterations = 0xff;
+       cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+
+       iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
+                                      ssid_bitmap, cmd);
+
+       iwl_mvm_build_unified_scan_probe(mvm, vif, ies, cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &hcmd);
+       if (!ret) {
+               IWL_DEBUG_SCAN(mvm,
+                              "Sched scan request was sent successfully\n");
+       } else {
+               /*
+                * If the scan failed, it usually means that the FW was unable
+                * to allocate the time events. Warn on it, but maybe we
+                * should try to send the command again with different params.
+                */
+               IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               ret = -EIO;
+       }
+       return ret;
+}
+
+
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+{
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
+               return iwl_mvm_scan_offload_stop(mvm, true);
+       return iwl_mvm_cancel_regular_scan(mvm);
+}
index 1fb01ea2e7047201324faefdc1f181408a77d070..81281396484724a9fc6cab21bb59b91b12ad52c1 100644 (file)
@@ -1448,3 +1448,77 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
 
        return 0;
 }
+
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+                                  struct iwl_mvm_sta *mvmsta, bool disable)
+{
+       struct iwl_mvm_add_sta_cmd cmd = {
+               .add_modify = STA_MODE_MODIFY,
+               .sta_id = mvmsta->sta_id,
+               .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+               .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+               .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+       };
+       int ret;
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX))
+               return;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       if (ret)
+               IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+                                     struct ieee80211_sta *sta,
+                                     bool disable)
+{
+       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+       spin_lock_bh(&mvm_sta->lock);
+
+       if (mvm_sta->disable_tx == disable) {
+               spin_unlock_bh(&mvm_sta->lock);
+               return;
+       }
+
+       mvm_sta->disable_tx = disable;
+
+       /*
+        * Tell mac80211 to start/stop queueing tx for this station,
+        * but don't stop queueing if there are still pending frames
+        * for this station.
+        */
+       if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id]))
+               ieee80211_sta_block_awake(mvm->hw, sta, disable);
+
+       iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+       spin_unlock_bh(&mvm_sta->lock);
+}
+
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+                                      struct iwl_mvm_vif *mvmvif,
+                                      bool disable)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvm_sta;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /* Block/unblock all the stations of the given mvmvif */
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+               if (IS_ERR_OR_NULL(sta))
+                       continue;
+
+               mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+               if (mvm_sta->mac_id_n_color !=
+                   FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
+                       continue;
+
+               iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+       }
+}
index d98e8a2142b8c6b1e3b9e0568956e0bcaa7a357c..3b1c8bd6cb54356c41c102402657cf765f617a50 100644 (file)
@@ -73,6 +73,7 @@
 #include "rs.h"
 
 struct iwl_mvm;
+struct iwl_mvm_vif;
 
 /**
  * DOC: station table - introduction
@@ -295,6 +296,7 @@ static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
  * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
  * @tx_protection: reference counter for controlling the Tx protection.
  * @tt_tx_protection: is thermal throttling enable Tx protection?
+ * @disable_tx: is tx to this STA disabled?
  *
  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
  * in the structure for use by driver. This structure is placed in that
@@ -317,6 +319,8 @@ struct iwl_mvm_sta {
        /* Temporary, until the new TLC will control the Tx protection */
        s8 tx_protection;
        bool tt_tx_protection;
+
+       bool disable_tx;
 };
 
 static inline struct iwl_mvm_sta *
@@ -404,5 +408,13 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       bool agg);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain);
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+                                  struct iwl_mvm_sta *mvmsta, bool disable);
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+                                     struct ieee80211_sta *sta,
+                                     bool disable);
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+                                      struct iwl_mvm_vif *mvmvif,
+                                      bool disable);
 
 #endif /* __sta_h__ */
index 80100f6cc12a85a79fc71bd83cfe2f6962f8f1c0..ae52613b97f2da706a72bae3d04ce9015e8495c8 100644 (file)
@@ -138,6 +138,41 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
        schedule_work(&mvm->roc_done_wk);
 }
 
+static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
+{
+       struct ieee80211_vif *csa_vif;
+
+       rcu_read_lock();
+
+       csa_vif = rcu_dereference(mvm->csa_vif);
+       if (!csa_vif || !csa_vif->csa_active)
+               goto out_unlock;
+
+       IWL_DEBUG_TE(mvm, "CSA NOA started\n");
+
+       /*
+        * CSA NoA is started but we still have beacons to
+        * transmit on the current channel.
+        * So we just do nothing here and the switch
+        * will be performed on the last TBTT.
+        */
+       if (!ieee80211_csa_is_complete(csa_vif)) {
+               IWL_WARN(mvm, "CSA NOA started too early\n");
+               goto out_unlock;
+       }
+
+       ieee80211_csa_finish(csa_vif);
+
+       rcu_read_unlock();
+
+       RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+       return;
+
+out_unlock:
+       rcu_read_unlock();
+}
+
 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif,
                                        const char *errmsg)
@@ -213,6 +248,14 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                        set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
                        iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
                        ieee80211_ready_on_channel(mvm->hw);
+               } else if (te_data->vif->type == NL80211_IFTYPE_AP) {
+                       if (le32_to_cpu(notif->status))
+                               iwl_mvm_csa_noa_start(mvm);
+                       else
+                               IWL_DEBUG_TE(mvm, "CSA NOA failed to start\n");
+
+                       /* we don't need it anymore */
+                       iwl_mvm_te_clear_data(mvm, te_data);
                }
        } else {
                IWL_WARN(mvm, "Got TE with unknown action\n");
@@ -538,3 +581,33 @@ void iwl_mvm_stop_p2p_roc(struct iwl_mvm *mvm)
 
        iwl_mvm_roc_finished(mvm);
 }
+
+int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             u32 duration, u32 apply_time)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+       struct iwl_time_event_cmd time_cmd = {};
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (te_data->running) {
+               IWL_DEBUG_TE(mvm, "CS NOA is already scheduled\n");
+               return -EBUSY;
+       }
+
+       time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+       time_cmd.id_and_color =
+               cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+       time_cmd.id = cpu_to_le32(TE_P2P_GO_CSA_NOA);
+       time_cmd.apply_time = cpu_to_le32(apply_time);
+       time_cmd.max_frags = TE_V2_FRAG_NONE;
+       time_cmd.duration = cpu_to_le32(duration);
+       time_cmd.repeat = 1;
+       time_cmd.interval = cpu_to_le32(1);
+       time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+                                     TE_V2_ABSENCE);
+
+       return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
index 4a61c8c02372824cd08b3901f2e26c2a0353f35d..2f48a90d4ad3e61b010609e2c1d13357c92b3427 100644 (file)
@@ -214,4 +214,33 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 
 void iwl_mvm_roc_done_wk(struct work_struct *wk);
 
+/**
+ * iwl_mvm_schedule_csa_noa - request NoA for channel switch
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the channel switch is issued
+ * @duration: the duration of the NoA in TU.
+ * @apply_time: NoA start time in GP2.
+ *
+ * This function is used to schedule NoA time event and is used to perform
+ * the channel switch flow.
+ */
+int iwl_mvm_schedule_csa_noa(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            u32 duration, u32 apply_time);
+
+/**
+ * iwl_mvm_te_scheduled - check if the fw received the TE cmd
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function returns true iff this TE is added to the fw.
+ */
+static inline bool
+iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
+{
+       if (!te_data)
+               return false;
+
+       return !!te_data->uid;
+}
+
 #endif /* __time_event_h__ */
index 3846a6c41eb165ffbb8ede0ff102547f36911e65..e9ff38635c21914fa8d4174c0582df61cf1c4779 100644 (file)
@@ -131,7 +131,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
            !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
                tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 
-       tx_cmd->driver_txop = 0;
        tx_cmd->tx_flags = cpu_to_le32(tx_flags);
        /* Total # bytes to be transmitted */
        tx_cmd->len = cpu_to_le16((u16)skb->len);
@@ -205,7 +204,13 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
        mvm->mgmt_last_antenna_idx =
                iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
                                     mvm->mgmt_last_antenna_idx);
-       rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+       if (info->band == IEEE80211_BAND_2GHZ &&
+           !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+               rate_flags = BIT(ANT_A) << RATE_MCS_ANT_POS;
+       else
+               rate_flags =
+                       BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
 
        /* Set CCK flag as needed */
        if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
@@ -717,18 +722,26 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        /* We can't free more than one frame at once on a shared queue */
        WARN_ON(skb_freed > 1);
 
-       /* If we have still frames from this STA nothing to do here */
+       /* If we have still frames for this STA nothing to do here */
        if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
                goto out;
 
        if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+
                /*
-                * If there are no pending frames for this STA, notify
-                * mac80211 that this station can go to sleep in its
+                * If there are no pending frames for this STA and
+                * the tx to this station is not disabled, notify
+                * mac80211 that this station can now wake up in its
                 * STA table.
                 * If mvmsta is not NULL, sta is valid.
                 */
-               ieee80211_sta_block_awake(mvm->hw, sta, false);
+
+               spin_lock_bh(&mvmsta->lock);
+
+               if (!mvmsta->disable_tx)
+                       ieee80211_sta_block_awake(mvm->hw, sta, false);
+
+               spin_unlock_bh(&mvmsta->lock);
        }
 
        if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
index aa9fc77e8413b607861e370169e0b55ba4b697d1..ac249da8a22b0840ce8de74638ba1969a119c67c 100644 (file)
@@ -519,71 +519,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                iwl_mvm_dump_umac_error_log(mvm);
 }
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
-{
-       const struct fw_img *img;
-       u32 ofs, sram_len;
-       void *sram;
-
-       if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
-               return;
-
-       img = &mvm->fw->img[mvm->cur_ucode];
-       ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-       sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
-
-       sram = kzalloc(sram_len, GFP_ATOMIC);
-       if (!sram)
-               return;
-
-       iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
-       mvm->fw_error_sram = sram;
-       mvm->fw_error_sram_len = sram_len;
-}
-
-void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
-{
-       int i, reg_val;
-       unsigned long flags;
-
-       if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
-               return;
-
-       /* reading buffer size */
-       reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
-       mvm->fw_error_rxf_len =
-               (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
-
-       /* the register holds the value divided by 128 */
-       mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
-
-       if (!mvm->fw_error_rxf_len)
-               return;
-
-       mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
-       if (!mvm->fw_error_rxf) {
-               mvm->fw_error_rxf_len = 0;
-               return;
-       }
-
-       if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
-               kfree(mvm->fw_error_rxf);
-               mvm->fw_error_rxf = NULL;
-               mvm->fw_error_rxf_len = 0;
-               return;
-       }
-
-       for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
-               iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
-                                    i * sizeof(u32));
-               mvm->fw_error_rxf[i] =
-                       iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
-       }
-       iwl_trans_release_nic_access(mvm->trans, &flags);
-}
-#endif
-
 /**
  * iwl_mvm_send_lq_cmd() - Send link quality command
  * @init: This command is sent as part of station initialization right
index 6c22b23a2845723c33df6757a1ced2c545747e94..78f72c34438aaabd7be9f94386a4e093689a2906 100644 (file)
@@ -260,6 +260,9 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @wd_timeout: queue watchdog timeout (jiffies)
  * @reg_lock: protect hw register access
  * @cmd_in_flight: true when we have a host command in flight
+ * @fw_mon_phys: physical address of the buffer for the firmware monitor
+ * @fw_mon_page: points to the first page of the buffer for the firmware monitor
+ * @fw_mon_size: size of the buffer for the firmware monitor
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
@@ -312,6 +315,10 @@ struct iwl_trans_pcie {
        /*protect hw register */
        spinlock_t reg_lock;
        bool cmd_in_flight;
+
+       dma_addr_t fw_mon_phys;
+       struct page *fw_mon_page;
+       u32 fw_mon_size;
 };
 
 #define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
index 788085bc65d78e3382c7fa76ca74de30abd4cd84..5b5b0d8c6f6051f7dd0311cf1437090d36a1d2f2 100644 (file)
 #include "iwl-fw-error-dump.h"
 #include "internal.h"
 
+static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (!trans_pcie->fw_mon_page)
+               return;
+
+       dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
+                      trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
+       __free_pages(trans_pcie->fw_mon_page,
+                    get_order(trans_pcie->fw_mon_size));
+       trans_pcie->fw_mon_page = NULL;
+       trans_pcie->fw_mon_phys = 0;
+       trans_pcie->fw_mon_size = 0;
+}
+
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct page *page;
+       dma_addr_t phys;
+       u32 size;
+       u8 power;
+
+       if (trans_pcie->fw_mon_page) {
+               dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
+                                          trans_pcie->fw_mon_size,
+                                          DMA_FROM_DEVICE);
+               return;
+       }
+
+       phys = 0;
+       for (power = 26; power >= 11; power--) {
+               int order;
+
+               size = BIT(power);
+               order = get_order(size);
+               page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
+                                  order);
+               if (!page)
+                       continue;
+
+               phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
+                                   DMA_FROM_DEVICE);
+               if (dma_mapping_error(trans->dev, phys)) {
+                       __free_pages(page, order);
+                       continue;
+               }
+               IWL_INFO(trans,
+                        "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
+                        size, order);
+               break;
+       }
+
+       if (!page)
+               return;
+
+       trans_pcie->fw_mon_page = page;
+       trans_pcie->fw_mon_phys = phys;
+       trans_pcie->fw_mon_size = size;
+}
+
 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
 {
        iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
@@ -675,6 +737,7 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
                                const struct fw_img *image)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int ret = 0;
        int first_ucode_section;
 
@@ -733,6 +796,20 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
                        return ret;
        }
 
+       /* supported for 7000 only for the moment */
+       if (iwlwifi_mod_params.fw_monitor &&
+           trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_pcie_alloc_fw_monitor(trans);
+
+               if (trans_pcie->fw_mon_size) {
+                       iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
+                                      trans_pcie->fw_mon_phys >> 4);
+                       iwl_write_prph(trans, MON_BUFF_END_ADDR,
+                                      (trans_pcie->fw_mon_phys +
+                                       trans_pcie->fw_mon_size) >> 4);
+               }
+       }
+
        /* release CPU reset */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
                iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
@@ -1126,6 +1203,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        if (trans_pcie->napi.poll)
                netif_napi_del(&trans_pcie->napi);
 
+       iwl_pcie_free_fw_monitor(trans);
+
        kfree(trans);
 }
 
@@ -1494,10 +1573,12 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
                txq = &trans_pcie->txq[cnt];
                q = &txq->q;
                pos += scnprintf(buf + pos, bufsz - pos,
-                               "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
+                               "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n",
                                cnt, q->read_ptr, q->write_ptr,
                                !!test_bit(cnt, trans_pcie->queue_used),
-                               !!test_bit(cnt, trans_pcie->queue_stopped));
+                                !!test_bit(cnt, trans_pcie->queue_stopped),
+                                txq->need_update,
+                                (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
        }
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
        kfree(buf);
@@ -1519,6 +1600,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
                                                rxq->read);
        pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
                                                rxq->write);
+       pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
+                                               rxq->write_actual);
+       pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
+                                               rxq->need_update);
        pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
                                                rxq->free_count);
        if (rxq->rb_stts) {
@@ -1698,10 +1783,15 @@ static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
        u32 len;
        int i, ptr;
 
+       len = sizeof(*data) +
+               cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+
+       if (trans_pcie->fw_mon_page)
+               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+                       trans_pcie->fw_mon_size;
+
        if (!buf)
-               return sizeof(*data) +
-                      cmdq->q.n_window * (sizeof(*txcmd) +
-                                          TFD_MAX_PAYLOAD_SIZE);
+               return len;
 
        len = 0;
        data = buf;
@@ -1729,7 +1819,40 @@ static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans,
        spin_unlock_bh(&cmdq->lock);
 
        data->len = cpu_to_le32(len);
-       return sizeof(*data) + len;
+       len += sizeof(*data);
+
+       if (trans_pcie->fw_mon_page) {
+               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+
+               data = iwl_fw_error_next_data(data);
+               data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+               data->len = cpu_to_le32(trans_pcie->fw_mon_size +
+                                       sizeof(*fw_mon_data));
+               fw_mon_data = (void *)data->data;
+               fw_mon_data->fw_mon_wr_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, MON_BUFF_WRPTR));
+               fw_mon_data->fw_mon_cycle_cnt =
+                       cpu_to_le32(iwl_read_prph(trans, MON_BUFF_CYCLE_CNT));
+               fw_mon_data->fw_mon_base_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, MON_BUFF_BASE_ADDR));
+
+               /*
+                * The firmware is now asserted, it won't write anything to
+                * the buffer. CPU can take ownership to fetch the data.
+                * The buffer will be handed back to the device before the
+                * firmware will be restarted.
+                */
+               dma_sync_single_for_cpu(trans->dev, trans_pcie->fw_mon_phys,
+                                       trans_pcie->fw_mon_size,
+                                       DMA_FROM_DEVICE);
+               memcpy(fw_mon_data->data, page_address(trans_pcie->fw_mon_page),
+                      trans_pcie->fw_mon_size);
+
+               len += sizeof(*data) + sizeof(*fw_mon_data) +
+                       trans_pcie->fw_mon_size;
+       }
+
+       return len;
 }
 #else
 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
@@ -1870,6 +1993,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        }
 
        trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+       /*
+        * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+        * changed, and now the revision step also includes bit 0-1 (no more
+        * "dash" value). To keep hw_rev backwards compatible - we'll store it
+        * in the old format.
+        */
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               trans->hw_rev = (trans->hw_rev & 0xfff0) |
+                               ((trans->hw_rev << 2) & 0xc);
+
        trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
index 038940afbdc57d8d176908bb4869fc8791f250eb..6acccb19c4f3030956e0d701a037c1227d7121b1 100644 (file)
@@ -1438,6 +1438,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                        spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
                        trans_pcie->cmd_in_flight = false;
+                       IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
                        idx = -EIO;
                        goto out;
                }
index 0485c99575757841edb0f929205d4963c6d082a2..e6268ceacbf1e5ddeb8e0d6c2ab321523e289801 100644 (file)
@@ -16,7 +16,7 @@ config LIBERTAS_USB
 
 config LIBERTAS_CS
        tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
-       depends on LIBERTAS && PCMCIA
+       depends on LIBERTAS && PCMCIA && HAS_IOPORT_MAP
        ---help---
          A driver for Marvell Libertas 8385 CompactFlash devices.
 
index aaa297315c47102df73b6fd3948e4b8fd4cbba25..0387a5b380c80f5eeafd05c53c3ec12fc5b892f5 100644 (file)
@@ -1111,6 +1111,7 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
 
        cmd.hdr.size = cpu_to_le16(sizeof(cmd));
        cmd.action = cpu_to_le16(CMD_ACT_SET);
+       cmd.control = 0;
 
        /* Only v8 and below support setting the preamble */
        if (priv->fwrelease < 0x09000000) {
index 0c02f0483d1fd65e8b2a61b55ab57cbcc95ad5f2..569b64ecc6075f1fa028091b7b319c20fbda01c9 100644 (file)
@@ -981,7 +981,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
                goto err_wdev;
        }
 
-       dev = alloc_netdev(0, "wlan%d", ether_setup);
+       dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
        if (!dev) {
                dev_err(dmdev, "no memory for network device instance\n");
                goto err_adapter;
index 6fef746345bc5ddd16957ad0d69a42a1f4636903..01a67f62696f74788e6e4b01eb717f1f6d7fb75c 100644 (file)
@@ -1000,7 +1000,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
                goto done;
        }
 
-       mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
+       mesh_dev = alloc_netdev(0, "msh%d", NET_NAME_UNKNOWN, ether_setup);
        if (!mesh_dev) {
                lbs_deb_mesh("init mshX device failed\n");
                ret = -ENOMEM;
index a312c653d1163fcc5c4ff394a54b0c7a96370d8f..5ea65fce0b83224bc3628138da76a8ff518d0d9f 100644 (file)
@@ -781,6 +781,36 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
        netif_rx(skb);
 }
 
+struct mac80211_hwsim_addr_match_data {
+       u8 addr[ETH_ALEN];
+       bool ret;
+};
+
+static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       struct mac80211_hwsim_addr_match_data *md = data;
+
+       if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+               md->ret = true;
+}
+
+static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
+                                     const u8 *addr)
+{
+       struct mac80211_hwsim_addr_match_data md = {
+               .ret = false,
+       };
+
+       memcpy(md.addr, addr, ETH_ALEN);
+
+       ieee80211_iterate_active_interfaces_atomic(data->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  mac80211_hwsim_addr_iter,
+                                                  &md);
+
+       return md.ret;
+}
 
 static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
                           struct sk_buff *skb)
@@ -798,8 +828,7 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
                /* Allow unicast frames to own address if there is a pending
                 * PS-Poll */
                if (data->ps_poll_pending &&
-                   memcmp(data->hw->wiphy->perm_addr, skb->data + 4,
-                          ETH_ALEN) == 0) {
+                   mac80211_hwsim_addr_match(data, skb->data + 4)) {
                        data->ps_poll_pending = false;
                        return true;
                }
@@ -809,39 +838,6 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
        return true;
 }
 
-
-struct mac80211_hwsim_addr_match_data {
-       bool ret;
-       const u8 *addr;
-};
-
-static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
-                                    struct ieee80211_vif *vif)
-{
-       struct mac80211_hwsim_addr_match_data *md = data;
-       if (memcmp(mac, md->addr, ETH_ALEN) == 0)
-               md->ret = true;
-}
-
-
-static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
-                                     const u8 *addr)
-{
-       struct mac80211_hwsim_addr_match_data md;
-
-       if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
-               return true;
-
-       md.ret = false;
-       md.addr = addr;
-       ieee80211_iterate_active_interfaces_atomic(data->hw,
-                                                  IEEE80211_IFACE_ITER_NORMAL,
-                                                  mac80211_hwsim_addr_iter,
-                                                  &md);
-
-       return md.ret;
-}
-
 static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                                       struct sk_buff *my_skb,
                                       int dst_portid)
@@ -1740,9 +1736,10 @@ static void hw_scan_work(struct work_struct *work)
 
 static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 struct cfg80211_scan_request *req)
+                                 struct ieee80211_scan_request *hw_req)
 {
        struct mac80211_hwsim_data *hwsim = hw->priv;
+       struct cfg80211_scan_request *req = &hw_req->req;
 
        mutex_lock(&hwsim->mutex);
        if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
@@ -2679,7 +2676,8 @@ static int __init init_mac80211_hwsim(void)
                        goto out_free_radios;
        }
 
-       hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
+       hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN,
+                                hwsim_mon_setup);
        if (hwsim_mon == NULL) {
                err = -ENOMEM;
                goto out_free_radios;
index 706831df1fa2a4183cb3c5ad849f1aa8df8dbb14..59d23fb2365f202112ac9e2a1a322885f0d791b6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11ac
  *
- * Copyright (C) 2013, Marvell International Ltd.
+ * Copyright (C) 2013-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0b02cb6cfcb4d25ea496b21c2f737d94a0017e0c..1ca92c7a8a4a864ec3a2d370d1eda7bed7f163f3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11ac
  *
- * Copyright (C) 2013, Marvell International Ltd.
+ * Copyright (C) 2013-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index e76b0db4e3e6392236489affbfcba48b98468348..2668e83afbb65c52c58dddd2cea1a8261390d0a7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11h
  *
- * Copyright (C) 2013, Marvell International Ltd.
+ * Copyright (C) 2013-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index e1c2f67ae85e694d52b1f9e4ad69f2d50ab6ba91..9d6d8d9f01e39bfe2716e28fed9b4d7b7e61b140 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0b73fa08f5d466b98d5292d0a5b16e1011c30be3..2ee268b632be56ca1aa994ed9b1de7baece31d3d 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index fe0f66f735076d68aa7cef6e19ab791d34411b4a..8720a3d3c755c6065dd9db413ee708680647b531 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n Aggregation
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 892098d6a69687dd2d8c1fc61612a6fb9999d754..0cd2a3eb6c178ad3314415658df9f36f1a6866f7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n Aggregation
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0c3571f830b0d70cc609e64d9b0b6fde3cf17209..b22bae3d1205bd0c22000fc9e66a50795e1f3eb9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0fc76e4a60f886c32d3e46cf885cbcc809ee9893..3a87bb0e3a62adb477784a94feba13cefd32626b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 2aa208ffbe233eefc06f9cdb24c64db9c56f3188..9487d728ac20acb46521089111ebd46ff5ab8a83 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2011, Marvell International Ltd.
+# Copyright (C) 2011-2014, Marvell International Ltd.
 #
 # This software file (the "File") is distributed by Marvell International
 # Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 3b55ce5690a54e226c5482f523a3c80d1e95d7bf..31928caeeed225edbae57f38bd7488edee7b726d 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, Marvell International Ltd.
+# Copyright (C) 2011-2014, Marvell International Ltd.
 #
 # This software file (the "File") is distributed by Marvell International
 # Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -194,6 +194,36 @@ rdeeprom
        Example:
                echo "0 20" > rdeeprom      : Read 20 bytes of EEPROM data from offset 0
 
+hscfg
+       This command is used to debug/simulate host sleep feature using
+       different configuration parameters.
+
+       Usage:
+               echo "<condition> [GPIO# [gap]]]" > hscfg
+               cat hscfg
+
+       where the parameters are,
+               <condition>: bit 0 = 1   -- broadcast data
+                            bit 1 = 1   -- unicast data
+                            bit 2 = 1   -- mac event
+                            bit 3 = 1   -- multicast data
+               [GPIO#]: pin number of GPIO used to wakeup the host.
+                        GPIO pin# (e.g. 0-7) or 0xff (interface, e.g. SDIO
+                        will be used instead).
+               [gap]:   the gap in milliseconds between wakeup signal and
+                        wakeup event or 0xff for special setting (host
+                        acknowledge required) when GPIO is used to wakeup host.
+
+       Examples:
+               echo "-1" > hscfg        : Cancel host sleep mode
+               echo "3" > hscfg         : Broadcast and unicast data;
+                                          Use GPIO and gap set previously
+               echo "2 3" > hscfg       : Unicast data and GPIO 3;
+                                          Use gap set previously
+               echo "2 1 160" > hscfg   : Unicast data, GPIO 1 and gap 160 ms
+               echo "2 1 0xff" > hscfg  : Unicast data, GPIO 1; Wait for host
+                                          to ack before sending wakeup event
+
 getlog
         This command is used to get the statistics available in the station.
        Usage:
index b511613bba2d8608f057fd15223c2af33c3be962..ca87f923c61eaa251908add3f2bc81c95cdb9132 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: CFG80211
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -42,36 +42,6 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
        .beacon_int_infra_match = true,
 };
 
-static const struct ieee80211_regdomain mwifiex_world_regdom_custom = {
-       .n_reg_rules = 7,
-       .alpha2 =  "99",
-       .reg_rules = {
-               /* Channel 1 - 11 */
-               REG_RULE(2412-10, 2462+10, 40, 3, 20, 0),
-               /* Channel 12 - 13 */
-               REG_RULE(2467-10, 2472+10, 20, 3, 20,
-                        NL80211_RRF_NO_IR),
-               /* Channel 14 */
-               REG_RULE(2484-10, 2484+10, 20, 3, 20,
-                        NL80211_RRF_NO_IR |
-                        NL80211_RRF_NO_OFDM),
-               /* Channel 36 - 48 */
-               REG_RULE(5180-10, 5240+10, 40, 3, 20,
-                        NL80211_RRF_NO_IR),
-               /* Channel 149 - 165 */
-               REG_RULE(5745-10, 5825+10, 40, 3, 20,
-                        NL80211_RRF_NO_IR),
-               /* Channel 52 - 64 */
-               REG_RULE(5260-10, 5320+10, 40, 3, 30,
-                        NL80211_RRF_NO_IR |
-                        NL80211_RRF_DFS),
-               /* Channel 100 - 140 */
-               REG_RULE(5500-10, 5700+10, 40, 3, 30,
-                        NL80211_RRF_NO_IR |
-                        NL80211_RRF_DFS),
-       }
-};
-
 /*
  * This function maps the nl802.11 channel type into driver channel type.
  *
@@ -151,7 +121,6 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
        u8 addr[ETH_ALEN] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
        u16 pkt_len;
        u32 tx_control = 0, pkt_type = PKT_TYPE_MGMT;
-       struct timeval tv;
 
        pkt_len = len + ETH_ALEN;
 
@@ -173,8 +142,7 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
               len - sizeof(struct ieee80211_hdr_3addr));
 
        skb->priority = LOW_PRIO_TID;
-       do_gettimeofday(&tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       __net_timestamp(skb);
 
        return 0;
 }
@@ -2264,7 +2232,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        }
 
        dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
-                              ether_setup, IEEE80211_NUM_ACS, 1);
+                              NET_NAME_UNKNOWN, ether_setup,
+                              IEEE80211_NUM_ACS, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2484,6 +2453,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                if (filt_num)
                        mef_entry->filter[filt_num].filt_action = TYPE_OR;
+
+               filt_num++;
+               mef_entry->filter[filt_num].repeat = 16;
+               memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
+                      ETH_ALEN);
+               mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+                                                               ETH_ALEN;
+               mef_entry->filter[filt_num].offset = 56;
+               mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+               mef_entry->filter[filt_num].filt_action = TYPE_OR;
        }
 
        if (!mef_cfg.criteria)
@@ -2632,7 +2611,8 @@ static int
 mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
                           const u8 *peer, u8 action_code, u8 dialog_token,
                           u16 status_code, u32 peer_capability,
-                          const u8 *extra_ies, size_t extra_ies_len)
+                          bool initiator, const u8 *extra_ies,
+                          size_t extra_ies_len)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        int ret;
@@ -2917,12 +2897,6 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
                                WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
 
-       wiphy->regulatory_flags |=
-                       REGULATORY_CUSTOM_REG |
-                       REGULATORY_STRICT_REG;
-
-       wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
-
 #ifdef CONFIG_PM
        wiphy->wowlan = &mwifiex_wowlan_support;
 #endif
index c5848934f1117d15e343c9e925f407a7d03a34de..908367857d58918f6e00f78d1b2b996acda06dc4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: CFG80211
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0ddec3d4b059cbd7d03221578035d5b1721db9b2..b8242eb2be6fd272906c605532f47b986ae61c08 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: Channel, Frequence and Power
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index c161141f6c39ec8c2bcf5d8e9a2a2951c9f94a71..5899eee87fb1a082a593ac833ccd58236006584f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: commands and events
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -273,6 +273,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
                                (struct mwifiex_opt_sleep_confirm *)
                                                adapter->sleep_cfm->data;
        struct sk_buff *sleep_cfm_tmp;
+       struct timeval ts;
        __le32 tmp;
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -283,6 +284,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
                                        (adapter->seq_num, priv->bss_num,
                                         priv->bss_type)));
 
+       do_gettimeofday(&ts);
+       dev_dbg(adapter->dev,
+               "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d, seqno %#x\n",
+               ts.tv_sec, ts.tv_usec, le16_to_cpu(sleep_cfm_buf->command),
+               le16_to_cpu(sleep_cfm_buf->action),
+               le16_to_cpu(sleep_cfm_buf->size),
+               le16_to_cpu(sleep_cfm_buf->seq_num));
+
        if (adapter->iface_type == MWIFIEX_USB) {
                sleep_cfm_tmp =
                        dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
@@ -458,11 +467,10 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
                rx_info->bss_type = priv->bss_type;
        }
 
-       if (eventcause != EVENT_PS_SLEEP && eventcause != EVENT_PS_AWAKE) {
-               do_gettimeofday(&tstamp);
-               dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
-                       tstamp.tv_sec, tstamp.tv_usec, eventcause);
-       } else {
+       do_gettimeofday(&tstamp);
+       dev_dbg(adapter->dev, "EVENT: %lu.%lu: cause: %#x\n",
+               tstamp.tv_sec, tstamp.tv_usec, eventcause);
+       if (eventcause == EVENT_PS_SLEEP || eventcause == EVENT_PS_AWAKE) {
                /* Handle PS_SLEEP/AWAKE events on STA */
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
                if (!priv)
@@ -961,6 +969,9 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
                mwifiex_init_fw_complete(adapter);
 
+       if (adapter->if_ops.fw_dump)
+               adapter->if_ops.fw_dump(adapter);
+
        if (adapter->if_ops.card_reset)
                adapter->if_ops.card_reset(adapter);
 }
@@ -1226,12 +1237,19 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
        uint16_t result = le16_to_cpu(cmd->result);
        uint16_t command = le16_to_cpu(cmd->command);
        uint16_t seq_num = le16_to_cpu(cmd->seq_num);
+       struct timeval ts;
 
        if (!upld_len) {
                dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
                return;
        }
 
+       do_gettimeofday(&ts);
+       dev_dbg(adapter->dev,
+               "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d, len %d, seqno 0x%x\n",
+               ts.tv_sec, ts.tv_usec, command, result, le16_to_cpu(cmd->size),
+               seq_num);
+
        /* Get BSS number and corresponding priv */
        priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
                                      HostCmd_GET_BSS_TYPE(seq_num));
index 7b419bbcd5444f5c5abdf40ffb2368087b77e89a..2713f7acd35e6d45081482c36b983a8a92af89a3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: debugfs
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -692,6 +692,97 @@ done:
        return ret;
 }
 
+/* Proc hscfg file write handler
+ * This function can be used to configure the host sleep parameters.
+ */
+static ssize_t
+mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
+                   size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = (void *)file->private_data;
+       unsigned long addr = get_zeroed_page(GFP_KERNEL);
+       char *buf = (char *)addr;
+       size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
+       int ret, arg_num;
+       struct mwifiex_ds_hs_cfg hscfg;
+       int conditions = HS_CFG_COND_DEF;
+       u32 gpio = HS_CFG_GPIO_DEF, gap = HS_CFG_GAP_DEF;
+
+       if (!buf)
+               return -ENOMEM;
+
+       if (copy_from_user(buf, ubuf, buf_size)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       arg_num = sscanf(buf, "%d %x %x", &conditions, &gpio, &gap);
+
+       memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
+
+       if (arg_num > 3) {
+               dev_err(priv->adapter->dev, "Too many arguments\n");
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if (arg_num >= 1 && arg_num < 3)
+               mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_GET,
+                                     MWIFIEX_SYNC_CMD, &hscfg);
+
+       if (arg_num) {
+               if (conditions == HS_CFG_CANCEL) {
+                       mwifiex_cancel_hs(priv, MWIFIEX_ASYNC_CMD);
+                       ret = count;
+                       goto done;
+               }
+               hscfg.conditions = conditions;
+       }
+       if (arg_num >= 2)
+               hscfg.gpio = gpio;
+       if (arg_num == 3)
+               hscfg.gap = gap;
+
+       hscfg.is_invoke_hostcmd = false;
+       mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+                             MWIFIEX_SYNC_CMD, &hscfg);
+
+       mwifiex_enable_hs(priv->adapter);
+       priv->adapter->hs_enabling = false;
+       ret = count;
+done:
+       free_page(addr);
+       return ret;
+}
+
+/* Proc hscfg file read handler
+ * This function can be used to read host sleep configuration
+ * parameters from driver.
+ */
+static ssize_t
+mwifiex_hscfg_read(struct file *file, char __user *ubuf,
+                  size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = (void *)file->private_data;
+       unsigned long addr = get_zeroed_page(GFP_KERNEL);
+       char *buf = (char *)addr;
+       int pos, ret;
+       struct mwifiex_ds_hs_cfg hscfg;
+
+       if (!buf)
+               return -ENOMEM;
+
+       mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_GET,
+                             MWIFIEX_SYNC_CMD, &hscfg);
+
+       pos = snprintf(buf, PAGE_SIZE, "%u 0x%x 0x%x\n", hscfg.conditions,
+                      hscfg.gpio, hscfg.gap);
+
+       ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+       free_page(addr);
+       return ret;
+}
 
 #define MWIFIEX_DFS_ADD_FILE(name) do {                                 \
        if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir,        \
@@ -725,6 +816,7 @@ MWIFIEX_DFS_FILE_READ_OPS(getlog);
 MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
 MWIFIEX_DFS_FILE_OPS(regrdwr);
 MWIFIEX_DFS_FILE_OPS(rdeeprom);
+MWIFIEX_DFS_FILE_OPS(hscfg);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -747,6 +839,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(regrdwr);
        MWIFIEX_DFS_ADD_FILE(rdeeprom);
        MWIFIEX_DFS_ADD_FILE(fw_dump);
+       MWIFIEX_DFS_ADD_FILE(hscfg);
 }
 
 /*
index 38da6ff6f41623618efa22add335ffef1fa46828..0e03fe39fc35ea64fca0933a20b46966686a5208 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: generic data structures and APIs
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index bfb39908b2c694b0c58b2ee2695b22032bb746ad..04e56b5fc5354eea46d6cf44b88707bed3f19367 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: ethtool
  *
- * Copyright (C) 2013, Marvell International Ltd.
+ * Copyright (C) 2013-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -64,7 +64,90 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev,
        return 0;
 }
 
+static int
+mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct memory_type_mapping *entry;
+
+       if (!adapter->if_ops.fw_dump)
+               return -ENOTSUPP;
+
+       dump->flag = adapter->curr_mem_idx;
+       dump->version = 1;
+       if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
+               entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
+               dump->len = entry->mem_size;
+       } else {
+               dump->len = 0;
+       }
+
+       return 0;
+}
+
+static int
+mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
+                     void *buffer)
+{
+       u8 *p = buffer;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct memory_type_mapping *entry;
+
+       if (!adapter->if_ops.fw_dump)
+               return -ENOTSUPP;
+
+       if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
+               dev_err(adapter->dev, "firmware dump in progress!!\n");
+               return -EBUSY;
+       }
+
+       entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
+
+       if (!entry->mem_ptr)
+               return -EFAULT;
+
+       memcpy(p, entry->mem_ptr, entry->mem_size);
+
+       entry->mem_size = 0;
+       vfree(entry->mem_ptr);
+       entry->mem_ptr = NULL;
+
+       return 0;
+}
+
+static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       struct mwifiex_adapter *adapter = priv->adapter;
+
+       if (!adapter->if_ops.fw_dump)
+               return -ENOTSUPP;
+
+       if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
+               dev_err(adapter->dev, "firmware dump in progress!!\n");
+               return -EBUSY;
+       }
+
+       if (val->flag == MWIFIEX_FW_DUMP_IDX) {
+               adapter->curr_mem_idx = val->flag;
+               adapter->if_ops.fw_dump(adapter);
+               return 0;
+       }
+
+       if (val->flag < 0 || val->flag >= adapter->num_mem_types)
+               return -EINVAL;
+
+       adapter->curr_mem_idx = val->flag;
+
+       return 0;
+}
+
 const struct ethtool_ops mwifiex_ethtool_ops = {
        .get_wol = mwifiex_ethtool_get_wol,
        .set_wol = mwifiex_ethtool_set_wol,
+       .get_dump_flag = mwifiex_get_dump_flag,
+       .get_dump_data = mwifiex_get_dump_data,
+       .set_dump = mwifiex_set_dump,
 };
index 3175dd04834b9960698c67e750fc3d8928075088..5561573452bb4a27cc92a6ae490303448ea0e103 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: Firmware specific macros & structures
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 3bf3d58bbc029b0a48a937a0c65d15a83552b295..b933794758b7171e01f8104d4b0f6b63320103be 100644 (file)
@@ -2,7 +2,7 @@
  * Marvell Wireless LAN device driver: management IE handling- setting and
  * deleting IE.
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 4ecd0b208ac64f5be6ffa75b9f15a1be965250a2..269a277d0a2e6072c092f066c9b23943d8075419 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: HW/FW Initialization
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -382,6 +382,8 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
 static void
 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
+       int idx;
+
        if (!adapter) {
                pr_err("%s: adapter is NULL\n", __func__);
                return;
@@ -396,7 +398,16 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
        dev_dbg(adapter->dev, "info: free cmd buffer\n");
        mwifiex_free_cmd_buffer(adapter);
 
-       dev_dbg(adapter->dev, "info: free scan table\n");
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
 
        if (adapter->sleep_cfm)
                dev_kfree_skb_any(adapter->sleep_cfm);
index 1b576722671d5e6f228363c36c94c4ef47867980..0847f3e07ab7888e2f79eada0e07fd263620edcb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: ioctl data structures & APIs
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 89dc62a467f4d2ba8b7cc2fcf6b7d1d63b058f68..fc135649b85f4ebc63edfb0b2b2f7c4d9871ef98 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: association and ad-hoc start/join
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index e91cd0fa5ca81e3585e8173a0fb6a1789cfdaca7..3e5194fb0b0fd44c5314e8bbe5fb950325bb759c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: major functions
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -609,7 +609,6 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        struct sk_buff *new_skb;
        struct mwifiex_txinfo *tx_info;
-       struct timeval tv;
 
        dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
                jiffies, priv->bss_type, priv->bss_num);
@@ -657,8 +656,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * firmware for aggregate delay calculation for stats and
         * MSDU lifetime expiry.
         */
-       do_gettimeofday(&tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       __net_timestamp(skb);
 
        mwifiex_queue_tx_pkt(priv, skb);
 
@@ -882,6 +880,8 @@ mwifiex_add_card(void *card, struct semaphore *sem,
                goto err_kmalloc;
 
        INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
+       if (adapter->if_ops.iface_work)
+               INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work);
 
        /* Register the device. Fill up the private data structure with relevant
           information from the card. */
index 1398afa8406401c9fd3898716b0849a22946cd42..a2733b1e63f9ec0374f38ba5908b18e52345dfd6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: major data structures and prototypes
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -30,6 +30,7 @@
 #include <linux/etherdevice.h>
 #include <net/sock.h>
 #include <net/lib80211.h>
+#include <linux/vmalloc.h>
 #include <linux/firmware.h>
 #include <linux/ctype.h>
 #include <linux/of.h>
@@ -410,6 +411,29 @@ struct mwifiex_roc_cfg {
        struct ieee80211_channel chan;
 };
 
+#define MWIFIEX_FW_DUMP_IDX            0xff
+#define FW_DUMP_MAX_NAME_LEN           8
+#define FW_DUMP_HOST_READY             0xEE
+#define FW_DUMP_DONE                   0xFF
+
+struct memory_type_mapping {
+       u8 mem_name[FW_DUMP_MAX_NAME_LEN];
+       u8 *mem_ptr;
+       u32 mem_size;
+       u8 done_flag;
+};
+
+enum rdwr_status {
+       RDWR_STATUS_SUCCESS = 0,
+       RDWR_STATUS_FAILURE = 1,
+       RDWR_STATUS_DONE = 2
+};
+
+enum mwifiex_iface_work_flags {
+       MWIFIEX_IFACE_WORK_FW_DUMP,
+       MWIFIEX_IFACE_WORK_CARD_RESET,
+};
+
 struct mwifiex_adapter;
 struct mwifiex_private;
 
@@ -674,6 +698,7 @@ struct mwifiex_if_ops {
        void (*card_reset) (struct mwifiex_adapter *);
        void (*fw_dump)(struct mwifiex_adapter *);
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
+       void (*iface_work)(struct work_struct *work);
 };
 
 struct mwifiex_adapter {
@@ -809,6 +834,11 @@ struct mwifiex_adapter {
        bool ext_scan;
        u8 fw_api_ver;
        u8 fw_key_api_major_ver, fw_key_api_minor_ver;
+       struct work_struct iface_work;
+       unsigned long iface_work_flags;
+       struct memory_type_mapping *mem_type_mapping_tbl;
+       u8 num_mem_types;
+       u8 curr_mem_idx;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -890,6 +920,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
 void mwifiex_process_hs_config(struct mwifiex_adapter *adapter);
 void mwifiex_hs_activated_event(struct mwifiex_private *priv,
                                        u8 activated);
+int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
+                         int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg);
 int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
                              struct host_cmd_ds_command *resp);
 int mwifiex_process_rx_packet(struct mwifiex_private *priv,
index 2cc9b6fca490cd4b57854a8002e61f0064c28397..5f7afffdd34e73dcdc020555afecd1e84c7a08ed 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: PCIE specific handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -37,6 +37,13 @@ static struct mwifiex_if_ops pcie_ops;
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping mem_type_mapping_tbl[] = {
+       {"ITCM", NULL, 0, 0xF0},
+       {"DTCM", NULL, 0, 0xF1},
+       {"SQRAM", NULL, 0, 0xF2},
+       {"IRAM", NULL, 0, 0xF3},
+};
+
 static int
 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                       size_t size, int flags)
@@ -192,6 +199,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.reg = data->reg;
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
+               card->pcie.supports_fw_dump = data->supports_fw_dump;
        }
 
        if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -221,6 +229,8 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
+       cancel_work_sync(&adapter->iface_work);
+
        if (user_rmmod) {
 #ifdef CONFIG_PM_SLEEP
                if (adapter->is_suspended)
@@ -307,6 +317,17 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
        return 0;
 }
 
+/* This function reads u8 data from PCIE card register. */
+static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
+                                int reg, u8 *data)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       *data = ioread8(card->pci_mmap1 + reg);
+
+       return 0;
+}
+
 /*
  * This function adds delay loop to ensure FW is awake before proceeding.
  */
@@ -2173,6 +2194,174 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
        return 0;
 }
 
+/* This function read/write firmware */
+static enum rdwr_status
+mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
+{
+       int ret, tries;
+       u8 ctrl_data;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
+       if (ret) {
+               dev_err(adapter->dev, "PCIE write err\n");
+               return RDWR_STATUS_FAILURE;
+       }
+
+       for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+               mwifiex_read_reg_byte(adapter, reg->fw_dump_ctrl, &ctrl_data);
+               if (ctrl_data == FW_DUMP_DONE)
+                       return RDWR_STATUS_SUCCESS;
+               if (doneflag && ctrl_data == doneflag)
+                       return RDWR_STATUS_DONE;
+               if (ctrl_data != FW_DUMP_HOST_READY) {
+                       dev_info(adapter->dev,
+                                "The ctrl reg was changed, re-try again!\n");
+                       mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
+                                         FW_DUMP_HOST_READY);
+                       if (ret) {
+                               dev_err(adapter->dev, "PCIE write err\n");
+                               return RDWR_STATUS_FAILURE;
+                       }
+               }
+               usleep_range(100, 200);
+       }
+
+       dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+       return RDWR_STATUS_FAILURE;
+}
+
+/* This function dump firmware memory to file */
+static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
+       unsigned int reg, reg_start, reg_end;
+       struct timeval t;
+       u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
+       enum rdwr_status stat;
+       u32 memory_size;
+       static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
+
+       if (!card->pcie.supports_fw_dump)
+               return;
+
+       for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
+               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
+
+       do_gettimeofday(&t);
+       dev_info(adapter->dev, "== mwifiex firmware dump start: %u.%06u ==\n",
+                (u32)t.tv_sec, (u32)t.tv_usec);
+
+       /* Read the number of the memories which will dump */
+       stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+       if (stat == RDWR_STATUS_FAILURE)
+               goto done;
+
+       reg = creg->fw_dump_start;
+       mwifiex_read_reg_byte(adapter, reg, &dump_num);
+
+       /* Read the length of every memory which will dump */
+       for (idx = 0; idx < dump_num; idx++) {
+               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+               stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+               if (stat == RDWR_STATUS_FAILURE)
+                       goto done;
+
+               memory_size = 0;
+               reg = creg->fw_dump_start;
+               for (i = 0; i < 4; i++) {
+                       mwifiex_read_reg_byte(adapter, reg, &read_reg);
+                       memory_size |= (read_reg << (i * 8));
+                       reg++;
+               }
+
+               if (memory_size == 0) {
+                       dev_info(adapter->dev, "Firmware dump Finished!\n");
+                       break;
+               }
+
+               dev_info(adapter->dev,
+                        "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+               entry->mem_ptr = vmalloc(memory_size + 1);
+               entry->mem_size = memory_size;
+               if (!entry->mem_ptr) {
+                       dev_err(adapter->dev,
+                               "Vmalloc %s failed\n", entry->mem_name);
+                       goto done;
+               }
+               dbg_ptr = entry->mem_ptr;
+               end_ptr = dbg_ptr + memory_size;
+
+               doneflag = entry->done_flag;
+               do_gettimeofday(&t);
+               dev_info(adapter->dev, "Start %s output %u.%06u, please wait...\n",
+                        entry->mem_name, (u32)t.tv_sec, (u32)t.tv_usec);
+
+               do {
+                       stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+                       if (RDWR_STATUS_FAILURE == stat)
+                               goto done;
+
+                       reg_start = creg->fw_dump_start;
+                       reg_end = creg->fw_dump_end;
+                       for (reg = reg_start; reg <= reg_end; reg++) {
+                               mwifiex_read_reg_byte(adapter, reg, dbg_ptr);
+                               if (dbg_ptr < end_ptr)
+                                       dbg_ptr++;
+                               else
+                                       dev_err(adapter->dev,
+                                               "Allocated buf not enough\n");
+                       }
+
+                       if (stat != RDWR_STATUS_DONE)
+                               continue;
+
+                       dev_info(adapter->dev, "%s done: size=0x%tx\n",
+                                entry->mem_name, dbg_ptr - entry->mem_ptr);
+                       break;
+               } while (true);
+       }
+       do_gettimeofday(&t);
+       dev_info(adapter->dev, "== mwifiex firmware dump end: %u.%06u ==\n",
+                (u32)t.tv_sec, (u32)t.tv_usec);
+
+       kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+
+done:
+       adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_pcie_work(struct work_struct *work)
+{
+       struct mwifiex_adapter *adapter =
+                       container_of(work, struct mwifiex_adapter, iface_work);
+
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+                              &adapter->iface_work_flags))
+               mwifiex_pcie_fw_dump_work(adapter);
+}
+
+/* This function dumps FW information */
+static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+{
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+               return;
+
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
+
+       schedule_work(&adapter->iface_work);
+}
+
 /*
  * This function initializes the PCI-E host memory space, WCB rings, etc.
  *
@@ -2342,6 +2531,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
        adapter->dev = &pdev->dev;
        adapter->tx_buf_size = card->pcie.tx_buf_size;
+       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
        strcpy(adapter->fw_name, card->pcie.firmware);
 
        return 0;
@@ -2394,6 +2585,8 @@ static struct mwifiex_if_ops pcie_ops = {
        .cleanup_mpa_buf =              NULL,
        .init_fw_port =                 mwifiex_pcie_init_fw_port,
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
+       .fw_dump =                      mwifiex_pcie_fw_dump,
+       .iface_work =                   mwifiex_pcie_work,
 };
 
 /*
index e8ec561f8a642495e410793539a0f2b808435cdf..a1a8fd3bc1be5355289b34457cb4ff1cafe894ba 100644 (file)
@@ -3,7 +3,7 @@
  * @brief This file contains definitions for PCI-E interface.
  * driver.
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -129,6 +129,9 @@ struct mwifiex_pcie_card_reg {
        u32 ring_tx_start_ptr;
        u8 pfu_enabled;
        u8 sleep_cookie;
+       u16 fw_dump_ctrl;
+       u16 fw_dump_start;
+       u16 fw_dump_end;
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
@@ -191,6 +194,9 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
        .pfu_enabled = 1,
        .sleep_cookie = 0,
+       .fw_dump_ctrl = 0xcf4,
+       .fw_dump_start = 0xcf8,
+       .fw_dump_end = 0xcff
 };
 
 struct mwifiex_pcie_device {
@@ -198,6 +204,7 @@ struct mwifiex_pcie_device {
        const struct mwifiex_pcie_card_reg *reg;
        u16 blksz_fw_dl;
        u16 tx_buf_size;
+       bool supports_fw_dump;
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
@@ -205,6 +212,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
        .reg            = &mwifiex_reg_8766,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+       .supports_fw_dump = false,
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
@@ -212,6 +220,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .reg            = &mwifiex_reg_8897,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .supports_fw_dump = true,
 };
 
 struct mwifiex_evt_buf_desc {
@@ -322,4 +331,5 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
 
        return 0;
 }
+
 #endif /* _MWIFIEX_PCIE_H */
index 45c5b3450cf5c719886483c9e0853c327fd62811..dee717a19ddb560956175c43789b2c802f029554 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: scan ioctl and command handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 4ce3d7b33991ace2cdd3ba0bcfe50c728e59220d..1da04a086bd955b3e64fd5556bee8e56284c8898 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: SDIO specific handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -50,6 +50,24 @@ static struct mwifiex_if_ops sdio_ops;
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping mem_type_mapping_tbl[] = {
+       {"ITCM", NULL, 0, 0xF0},
+       {"DTCM", NULL, 0, 0xF1},
+       {"SQRAM", NULL, 0, 0xF2},
+       {"APU", NULL, 0, 0xF3},
+       {"CIU", NULL, 0, 0xF4},
+       {"ICU", NULL, 0, 0xF5},
+       {"MAC", NULL, 0, 0xF6},
+       {"EXT7", NULL, 0, 0xF7},
+       {"EXT8", NULL, 0, 0xF8},
+       {"EXT9", NULL, 0, 0xF9},
+       {"EXT10", NULL, 0, 0xFA},
+       {"EXT11", NULL, 0, 0xFB},
+       {"EXT12", NULL, 0, 0xFC},
+       {"EXT13", NULL, 0, 0xFD},
+       {"EXTLAST", NULL, 0, 0xFE},
+};
+
 /*
  * SDIO probe.
  *
@@ -87,6 +105,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->tx_buf_size = data->tx_buf_size;
                card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
                card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
+               card->supports_fw_dump = data->supports_fw_dump;
        }
 
        sdio_claim_host(func);
@@ -179,6 +198,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
+       cancel_work_sync(&adapter->iface_work);
+
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -1777,6 +1798,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->dev = &func->dev;
 
        strcpy(adapter->fw_name, card->firmware);
+       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
 
        return 0;
 }
@@ -1914,10 +1937,10 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                port, card->mp_data_port_mask);
 }
 
-static struct mmc_host *reset_host;
-static void sdio_card_reset_worker(struct work_struct *work)
+static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
-       struct mmc_host *target = reset_host;
+       struct sdio_mmc_card *card = adapter->card;
+       struct mmc_host *target = card->func->card->host;
 
        /* The actual reset operation must be run outside of driver thread.
         * This is because mmc_remove_host() will cause the device to be
@@ -1933,15 +1956,213 @@ static void sdio_card_reset_worker(struct work_struct *work)
        mdelay(20);
        mmc_add_host(target);
 }
-static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
+
+/* This function read/write firmware */
+static enum
+rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
+                                      u8 doneflag)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       int ret, tries;
+       u8 ctrl_data = 0;
+
+       sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
+                   &ret);
+       if (ret) {
+               dev_err(adapter->dev, "SDIO Write ERR\n");
+               return RDWR_STATUS_FAILURE;
+       }
+       for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+               ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
+                                      &ret);
+               if (ret) {
+                       dev_err(adapter->dev, "SDIO read err\n");
+                       return RDWR_STATUS_FAILURE;
+               }
+               if (ctrl_data == FW_DUMP_DONE)
+                       break;
+               if (doneflag && ctrl_data == doneflag)
+                       return RDWR_STATUS_DONE;
+               if (ctrl_data != FW_DUMP_HOST_READY) {
+                       dev_info(adapter->dev,
+                                "The ctrl reg was changed, re-try again!\n");
+                       sdio_writeb(card->func, FW_DUMP_HOST_READY,
+                                   card->reg->fw_dump_ctrl, &ret);
+                       if (ret) {
+                               dev_err(adapter->dev, "SDIO write err\n");
+                               return RDWR_STATUS_FAILURE;
+                       }
+               }
+               usleep_range(100, 200);
+       }
+       if (ctrl_data == FW_DUMP_HOST_READY) {
+               dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+               return RDWR_STATUS_FAILURE;
+       }
+
+       return RDWR_STATUS_SUCCESS;
+}
+
+/* This function dump firmware memory to file */
+static void mwifiex_sdio_fw_dump_work(struct work_struct *work)
+{
+       struct mwifiex_adapter *adapter =
+                       container_of(work, struct mwifiex_adapter, iface_work);
+       struct sdio_mmc_card *card = adapter->card;
+       int ret = 0;
+       unsigned int reg, reg_start, reg_end;
+       u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
+       struct timeval t;
+       enum rdwr_status stat;
+       u32 memory_size;
+       static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
+
+       if (!card->supports_fw_dump)
+               return;
+
+       for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
+               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
+
+       mwifiex_pm_wakeup_card(adapter);
+       sdio_claim_host(card->func);
+
+       do_gettimeofday(&t);
+       dev_info(adapter->dev, "== mwifiex firmware dump start: %u.%06u ==\n",
+                (u32)t.tv_sec, (u32)t.tv_usec);
+
+       stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
+       if (stat == RDWR_STATUS_FAILURE)
+               goto done;
+
+       reg = card->reg->fw_dump_start;
+       /* Read the number of the memories which will dump */
+       dump_num = sdio_readb(card->func, reg, &ret);
+       if (ret) {
+               dev_err(adapter->dev, "SDIO read memory length err\n");
+               goto done;
+       }
+
+       /* Read the length of every memory which will dump */
+       for (idx = 0; idx < dump_num; idx++) {
+               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+
+               stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
+               if (stat == RDWR_STATUS_FAILURE)
+                       goto done;
+
+               memory_size = 0;
+               reg = card->reg->fw_dump_start;
+               for (i = 0; i < 4; i++) {
+                       read_reg = sdio_readb(card->func, reg, &ret);
+                       if (ret) {
+                               dev_err(adapter->dev, "SDIO read err\n");
+                               goto done;
+                       }
+                       memory_size |= (read_reg << i*8);
+                       reg++;
+               }
+
+               if (memory_size == 0) {
+                       dev_info(adapter->dev, "Firmware dump Finished!\n");
+                       break;
+               }
+
+               dev_info(adapter->dev,
+                        "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+               entry->mem_ptr = vmalloc(memory_size + 1);
+               entry->mem_size = memory_size;
+               if (!entry->mem_ptr) {
+                       dev_err(adapter->dev, "Vmalloc %s failed\n",
+                               entry->mem_name);
+                       goto done;
+               }
+               dbg_ptr = entry->mem_ptr;
+               end_ptr = dbg_ptr + memory_size;
+
+               doneflag = entry->done_flag;
+               do_gettimeofday(&t);
+               dev_info(adapter->dev, "Start %s output %u.%06u, please wait...\n",
+                        entry->mem_name, (u32)t.tv_sec, (u32)t.tv_usec);
+
+               do {
+                       stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
+                       if (stat == RDWR_STATUS_FAILURE)
+                               goto done;
+
+                       reg_start = card->reg->fw_dump_start;
+                       reg_end = card->reg->fw_dump_end;
+                       for (reg = reg_start; reg <= reg_end; reg++) {
+                               *dbg_ptr = sdio_readb(card->func, reg, &ret);
+                               if (ret) {
+                                       dev_err(adapter->dev,
+                                               "SDIO read err\n");
+                                       goto done;
+                               }
+                               if (dbg_ptr < end_ptr)
+                                       dbg_ptr++;
+                               else
+                                       dev_err(adapter->dev,
+                                               "Allocated buf not enough\n");
+                       }
+
+                       if (stat != RDWR_STATUS_DONE)
+                               continue;
+
+                       dev_info(adapter->dev, "%s done: size=0x%tx\n",
+                                entry->mem_name, dbg_ptr - entry->mem_ptr);
+                       break;
+               } while (1);
+       }
+       do_gettimeofday(&t);
+       dev_info(adapter->dev, "== mwifiex firmware dump end: %u.%06u ==\n",
+                (u32)t.tv_sec, (u32)t.tv_usec);
+
+       kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+
+done:
+       sdio_release_host(card->func);
+       adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_sdio_work(struct work_struct *work)
+{
+       struct mwifiex_adapter *adapter =
+                       container_of(work, struct mwifiex_adapter, iface_work);
+
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
+                              &adapter->iface_work_flags))
+               mwifiex_sdio_card_reset_work(adapter);
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+                              &adapter->iface_work_flags))
+               mwifiex_sdio_fw_dump_work(work);
+}
 
 /* This function resets the card */
 static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 {
-       struct sdio_mmc_card *card = adapter->card;
+       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags))
+               return;
+
+       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags);
+
+       schedule_work(&adapter->iface_work);
+}
+
+/* This function dumps FW information */
+static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
+{
+       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags))
+               return;
 
-       reset_host = card->func->card->host;
-       schedule_work(&card_reset_work);
+       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags);
+       schedule_work(&adapter->iface_work);
 }
 
 static struct mwifiex_if_ops sdio_ops = {
@@ -1964,6 +2185,8 @@ static struct mwifiex_if_ops sdio_ops = {
        .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
        .event_complete = mwifiex_sdio_event_complete,
        .card_reset = mwifiex_sdio_card_reset,
+       .iface_work = mwifiex_sdio_work,
+       .fw_dump = mwifiex_sdio_fw_dump,
 };
 
 /*
@@ -2001,7 +2224,6 @@ mwifiex_sdio_cleanup_module(void)
        /* Set the flag as user is removing this module. */
        user_rmmod = 1;
 
-       cancel_work_sync(&card_reset_work);
        sdio_unregister_driver(&mwifiex_sdio);
 }
 
index 6eea30b43ed714f3bd81f9453a5008001a11b33d..6b8835ec88f1a802dc9cbabf4efdb38f23b7973c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: SDIO specific definitions
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -219,6 +219,9 @@ struct mwifiex_sdio_card_reg {
        u8 rd_len_p0_l;
        u8 rd_len_p0_u;
        u8 card_misc_cfg_reg;
+       u8 fw_dump_ctrl;
+       u8 fw_dump_start;
+       u8 fw_dump_end;
 };
 
 struct sdio_mmc_card {
@@ -231,6 +234,7 @@ struct sdio_mmc_card {
        u8 mp_agg_pkt_limit;
        bool supports_sdio_new_mode;
        bool has_control_mask;
+       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
@@ -257,6 +261,7 @@ struct mwifiex_sdio_device {
        u8 mp_agg_pkt_limit;
        bool supports_sdio_new_mode;
        bool has_control_mask;
+       bool supports_fw_dump;
        u16 tx_buf_size;
        u32 mp_tx_agg_buf_size;
        u32 mp_rx_agg_buf_size;
@@ -307,6 +312,9 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
        .rd_len_p0_l = 0x0c,
        .rd_len_p0_u = 0x0d,
        .card_misc_cfg_reg = 0xcc,
+       .fw_dump_ctrl = 0xe2,
+       .fw_dump_start = 0xe3,
+       .fw_dump_end = 0xea,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
@@ -319,6 +327,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .supports_fw_dump = false,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -331,6 +340,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .supports_fw_dump = false,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -343,6 +353,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+       .supports_fw_dump = false,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -355,6 +366,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
        .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
        .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+       .supports_fw_dump = true,
 };
 
 /*
index 88202ce0c13965fdff679c506a3772ef287e6d54..0f077aaadab6be43441f3f5be535490e2059b72e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: station command handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 577f2979ed8f2bcacbacc6af6ce9af03d3f86137..822357b7b0bbb04818192457cab3df6e682ffb10 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: station command response handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index f6395ef11a721b8fc6d8ee797fb34a72b7c2f43d..f1c240eca0cdae2e6364f67e0aab150497afed2b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: station event handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 536c14aa71f39cb0e4f73417429fca55258af5ed..1a03d4d8b418453e52ed3618501c3c64da66db52 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: functions for station ioctl
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -26,7 +26,7 @@
 #include "11n.h"
 #include "cfg80211.h"
 
-static int disconnect_on_suspend = 1;
+static int disconnect_on_suspend;
 module_param(disconnect_on_suspend, int, 0644);
 
 /*
@@ -389,8 +389,8 @@ done:
  * This function prepares the correct firmware command and
  * issues it.
  */
-static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
-                                int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
+int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
+                         int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg)
 
 {
        struct mwifiex_adapter *adapter = priv->adapter;
index 8b639d7fe6df263814901554a2c43363be6791e9..9ceb1dbe34c532fdae40952a167675d76bdea9aa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: station RX data handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 70eb863c724974f94f16f4ea09b1b11b568f7803..dab7b33c54bed0d2849ba2b275b103bf0925ba1e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: station TX data handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 0e88364e0c670a5fe59fccdec3d711d679bf7be3..a414161c6064d783ed482c087ff7d7f1a10c013e 100644 (file)
@@ -530,7 +530,6 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
 {
        struct sk_buff *skb;
        struct mwifiex_txinfo *tx_info;
-       struct timeval tv;
        int ret;
        u16 skb_len;
 
@@ -609,8 +608,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
 
-       do_gettimeofday(&tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       __net_timestamp(skb);
        mwifiex_queue_tx_pkt(priv, skb);
 
        return 0;
@@ -703,7 +701,6 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
 {
        struct sk_buff *skb;
        struct mwifiex_txinfo *tx_info;
-       struct timeval tv;
        u8 *pos;
        u32 pkt_type, tx_control;
        u16 pkt_len, skb_len;
@@ -769,8 +766,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
        pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
        memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
               sizeof(pkt_len));
-       do_gettimeofday(&tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       __net_timestamp(skb);
        mwifiex_queue_tx_pkt(priv, skb);
 
        return 0;
index fd7e5b9b4581fa5d44ea60a3476e45aa3054e045..96a2126cc44bf81da15f8c17d6dc7895cad5abce 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: generic TX/RX data handling
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 32643555dd2a32a302d1301427e463d877c8260a..300bab4380117076dadaf51b4eaa516b7f799044 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: AP specific command handling
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 92e77a398ecfcaa0f7bf3493864f4823eb30394d..7c2b97660a032ccdb424b5783e38c0680f5599c3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: AP event handling
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index b0601b91cc4f1310b76f519e5ae3651ebb8fe1c5..ec7309d096abaf5a11845eea72659374e318fce1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: AP TX and RX data handling
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -96,7 +96,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        struct sk_buff *new_skb;
        struct mwifiex_txinfo *tx_info;
        int hdr_chop;
-       struct timeval tv;
        struct ethhdr *p_ethhdr;
 
        uap_rx_pd = (struct uap_rxpd *)(skb->data);
@@ -193,8 +192,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
                tx_info->pkt_len = skb->len;
        }
 
-       do_gettimeofday(&tv);
-       skb->tstamp = timeval_to_ktime(tv);
+       __net_timestamp(skb);
        mwifiex_wmm_add_buf_txqueue(priv, skb);
        atomic_inc(&adapter->tx_pending);
        atomic_inc(&adapter->pending_bridged_pkts);
index a8ce8130cfaeeda08a2a08f7b693540fe79d9f85..7118a18b91ba9f2a98a4333a2a5070f00d17a433 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: USB specific handling
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 15b73d12e9983dc98019dd739e80f38ffa66aedd..4c41c2a193c553106f61010574a0e656c49cfc2f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file contains definitions for mwifiex USB interface driver.
  *
- * Copyright (C) 2012, Marvell International Ltd.
+ * Copyright (C) 2012-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 6da5abf52e61a4360b236411b09310205e8316ea..cee028321a9ab73eac65ced1048e21fed2ce8e7e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: utility functions
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index caadb3737b9ebb6a877d707d9be44533c8643063..40296cb4a3f126c78a57369c03841942e3d4bb9c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: utility functions
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index d3671d009f6c3c89c4f6e91e06220edd2c481da9..94c98a86ebbec84bc83fe8c6c9f3e879c09df627 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: WMM
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -878,15 +878,8 @@ u8
 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
                                  const struct sk_buff *skb)
 {
+       u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
        u8 ret_val;
-       struct timeval out_tstamp, in_tstamp;
-       u32 queue_delay;
-
-       do_gettimeofday(&out_tstamp);
-       in_tstamp = ktime_to_timeval(skb->tstamp);
-
-       queue_delay = (out_tstamp.tv_sec - in_tstamp.tv_sec) * 1000;
-       queue_delay += (out_tstamp.tv_usec - in_tstamp.tv_usec) / 1000;
 
        /*
         * Queue delay is passed as a uint8 in units of 2ms (ms shifted
index eca56e371a57bb5afb2df6165c24fbbc4d34e561..569bd73f33c5f001f93241fe1c81b40e172230e0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Marvell Wireless LAN device driver: WMM
  *
- * Copyright (C) 2011, Marvell International Ltd.
+ * Copyright (C) 2011-2014, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
index 3c0a0a86ba12038056dee4bbc37d12cb50ae9868..9a3d4d6724f70324328f8d00f4bc52159fc76622 100644 (file)
@@ -1633,22 +1633,17 @@ static int mwl8k_tid_queue_mapping(u8 tid)
        case 0:
        case 3:
                return IEEE80211_AC_BE;
-               break;
        case 1:
        case 2:
                return IEEE80211_AC_BK;
-               break;
        case 4:
        case 5:
                return IEEE80211_AC_VI;
-               break;
        case 6:
        case 7:
                return IEEE80211_AC_VO;
-               break;
        default:
                return -1;
-               break;
        }
 }
 
index 60819bcf437735fd43a7f03d14ba988623e8e9e6..60698b02085190a1147f8bb96e1a21060be2295b 100644 (file)
@@ -107,7 +107,7 @@ config PCI_HERMES
 
 config PCMCIA_HERMES
        tristate "Hermes PCMCIA card support"
-       depends on PCMCIA && HERMES
+       depends on PCMCIA && HERMES && HAS_IOPORT_MAP
        ---help---
          A driver for "Hermes" chipset based PCMCIA wireless adaptors, such
          as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/
@@ -122,7 +122,7 @@ config PCMCIA_HERMES
 
 config PCMCIA_SPECTRUM
        tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
-       depends on PCMCIA && HERMES
+       depends on PCMCIA && HERMES && HAS_IOPORT_MAP
        ---help---
 
          This is a driver for 802.11b cards using RAM-loadable Symbol
index c90939ced0e483ae04c490c60a5f18eabbea1b9f..d3cf7c3ebfd656d4a592384f41fd348797ed17c8 100644 (file)
@@ -921,7 +921,6 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
                        retval = -EFAULT;
                }
                goto exit;
-               break;
        }
        if (ctx->in_rid) {
                struct ezusb_packet *ans = ctx->buf;
index de15171e2cd896f8dfd78c6d5e00966e2161ec12..63de5eed25cf9309f6f86a869481632b59ec892d 100644 (file)
@@ -193,7 +193,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
        /* allow users to customize their eeprom.
         */
 
-       ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
+       ret = request_firmware_direct(&eeprom, "3826.eeprom", &priv->spi->dev);
        if (ret < 0) {
 #ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
                dev_info(&priv->spi->dev, "loading default eeprom...\n");
index 47b34bfe890ae97387e6dc3f03171d4de2e8435c..3a8d2dbcfecdc870ffae07e64b24e15cb9330543 100644 (file)
@@ -793,7 +793,6 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
        switch (isl_oid[n].flags & OID_FLAG_TYPE) {
        case OID_TYPE_U32:
                return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
-               break;
        case OID_TYPE_BUFFER:{
                        struct obj_buffer *buff = r->ptr;
                        return snprintf(str, PRIV_STR_SIZE,
index cf61d6e3eaa7cd746a7bec20853277b5f1c69001..f3d3995d8f6b4c8ada23a5f25c6ad8035b108970 100644 (file)
@@ -76,6 +76,52 @@ static bool rsi_recalculate_weights(struct rsi_common *common)
        return recontend_queue;
 }
 
+/**
+ * rsi_get_num_pkts_dequeue() - This function determines the number of
+ *                             packets to be dequeued based on the number
+ *                             of bytes calculated using txop.
+ *
+ * @common: Pointer to the driver private structure.
+ * @q_num: the queue from which pkts have to be dequeued
+ *
+ * Return: pkt_num: Number of pkts to be dequeued.
+ */
+static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct sk_buff *skb;
+       u32 pkt_cnt = 0;
+       s16 txop = common->tx_qinfo[q_num].txop * 32;
+       __le16 r_txop;
+       struct ieee80211_rate rate;
+
+       rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
+       if (q_num == VI_Q)
+               txop = ((txop << 5) / 80);
+
+       if (skb_queue_len(&common->tx_queue[q_num]))
+               skb = skb_peek(&common->tx_queue[q_num]);
+       else
+               return 0;
+
+       do {
+               r_txop = ieee80211_generic_frame_duration(adapter->hw,
+                                                         adapter->vifs[0],
+                                                         common->band,
+                                                         skb->len, &rate);
+               txop -= le16_to_cpu(r_txop);
+               pkt_cnt += 1;
+               /*checking if pkts are still there*/
+               if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
+                       skb = skb->next;
+               else
+                       break;
+
+       } while (txop > 0);
+
+       return pkt_cnt;
+}
+
 /**
  * rsi_core_determine_hal_queue() - This function determines the queue from
  *                                 which packet has to be dequeued.
@@ -88,7 +134,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
        bool recontend_queue = false;
        u32 q_len = 0;
        u8 q_num = INVALID_QUEUE;
-       u8 ii = 0, min = 0;
+       u8 ii = 0;
 
        if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
                if (!common->mgmt_q_block)
@@ -96,6 +142,9 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
                return q_num;
        }
 
+       if (common->hw_data_qs_blocked)
+               return q_num;
+
        if (common->pkt_cnt != 0) {
                --common->pkt_cnt;
                return common->selected_qnum;
@@ -106,14 +155,15 @@ get_queue_num:
 
        q_num = rsi_determine_min_weight_queue(common);
 
-       q_len = skb_queue_len(&common->tx_queue[ii]);
        ii = q_num;
 
        /* Selecting the queue with least back off */
        for (; ii < NUM_EDCA_QUEUES; ii++) {
+               q_len = skb_queue_len(&common->tx_queue[ii]);
                if (((common->tx_qinfo[ii].pkt_contended) &&
-                    (common->tx_qinfo[ii].weight < min)) && q_len) {
-                       min = common->tx_qinfo[ii].weight;
+                    (common->tx_qinfo[ii].weight < common->min_weight)) &&
+                     q_len) {
+                       common->min_weight = common->tx_qinfo[ii].weight;
                        q_num = ii;
                }
        }
@@ -140,25 +190,9 @@ get_queue_num:
        common->selected_qnum = q_num;
        q_len = skb_queue_len(&common->tx_queue[q_num]);
 
-       switch (common->selected_qnum) {
-       case VO_Q:
-               if (q_len > MAX_CONTINUOUS_VO_PKTS)
-                       common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
-               else
-                       common->pkt_cnt = --q_len;
-               break;
-
-       case VI_Q:
-               if (q_len > MAX_CONTINUOUS_VI_PKTS)
-                       common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
-               else
-                       common->pkt_cnt = --q_len;
-
-               break;
-
-       default:
-               common->pkt_cnt = 0;
-               break;
+       if (q_num == VO_Q || q_num == VI_Q) {
+               common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
+               common->pkt_cnt -= 1;
        }
 
        return q_num;
@@ -252,6 +286,7 @@ void rsi_core_qos_processor(struct rsi_common *common)
 
                skb = rsi_core_dequeue_pkt(common, q_num);
                if (skb == NULL) {
+                       rsi_dbg(ERR_ZONE, "skb null\n");
                        mutex_unlock(&common->tx_rxlock);
                        break;
                }
@@ -306,7 +341,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
        }
 
        if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
-           (ieee80211_is_ctl(tmp_hdr->frame_control))) {
+           (ieee80211_is_ctl(tmp_hdr->frame_control)) ||
+           (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) {
                q_num = MGMT_SOFT_Q;
                skb->priority = q_num;
        } else {
@@ -325,6 +361,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
        if ((q_num != MGMT_SOFT_Q) &&
            ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
             DATA_QUEUE_WATER_MARK)) {
+               rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
                if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
                        ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
                rsi_set_event(&common->tx_thread.event);
index c466246a323f3118d158b22df5c389bdb46e759b..828a042f903f19fef82eb09425f3e865e534f551 100644 (file)
@@ -145,7 +145,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data)
        seq_printf(seq, "total_mgmt_pkt_send : %d\n",
                   common->tx_stats.total_tx_pkt_send[MGMT_SOFT_Q]);
        seq_printf(seq, "total_mgmt_pkt_queued : %d\n",
-                  skb_queue_len(&common->tx_queue[4]));
+                  skb_queue_len(&common->tx_queue[MGMT_SOFT_Q]));
        seq_printf(seq, "total_mgmt_pkt_freed  : %d\n",
                   common->tx_stats.total_tx_pkt_freed[MGMT_SOFT_Q]);
 
@@ -153,25 +153,25 @@ static int rsi_stats_read(struct seq_file *seq, void *data)
        seq_printf(seq, "total_data_vo_pkt_send: %8d\t",
                   common->tx_stats.total_tx_pkt_send[VO_Q]);
        seq_printf(seq, "total_data_vo_pkt_queued:  %8d\t",
-                  skb_queue_len(&common->tx_queue[0]));
+                  skb_queue_len(&common->tx_queue[VO_Q]));
        seq_printf(seq, "total_vo_pkt_freed: %8d\n",
                   common->tx_stats.total_tx_pkt_freed[VO_Q]);
        seq_printf(seq, "total_data_vi_pkt_send: %8d\t",
                   common->tx_stats.total_tx_pkt_send[VI_Q]);
        seq_printf(seq, "total_data_vi_pkt_queued:  %8d\t",
-                  skb_queue_len(&common->tx_queue[1]));
+                  skb_queue_len(&common->tx_queue[VI_Q]));
        seq_printf(seq, "total_vi_pkt_freed: %8d\n",
                   common->tx_stats.total_tx_pkt_freed[VI_Q]);
        seq_printf(seq,  "total_data_be_pkt_send: %8d\t",
                   common->tx_stats.total_tx_pkt_send[BE_Q]);
        seq_printf(seq, "total_data_be_pkt_queued:  %8d\t",
-                  skb_queue_len(&common->tx_queue[2]));
+                  skb_queue_len(&common->tx_queue[BE_Q]));
        seq_printf(seq, "total_be_pkt_freed: %8d\n",
                   common->tx_stats.total_tx_pkt_freed[BE_Q]);
        seq_printf(seq, "total_data_bk_pkt_send: %8d\t",
                   common->tx_stats.total_tx_pkt_send[BK_Q]);
        seq_printf(seq, "total_data_bk_pkt_queued:  %8d\t",
-                  skb_queue_len(&common->tx_queue[3]));
+                  skb_queue_len(&common->tx_queue[BK_Q]));
        seq_printf(seq, "total_bk_pkt_freed: %8d\n",
                   common->tx_stats.total_tx_pkt_freed[BK_Q]);
 
index 54aaeb09debf568c9dcad47ae3892153c2d827d6..aeaf87bb551841a607b9884aeed992513e0001a0 100644 (file)
@@ -177,7 +177,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
        sbands->ht_cap.cap = (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
                              IEEE80211_HT_CAP_SGI_20 |
                              IEEE80211_HT_CAP_SGI_40);
-       sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
+       sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
        sbands->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
        sbands->ht_cap.mcs.rx_mask[0] = 0xff;
        sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
@@ -185,7 +185,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
 }
 
 /**
- * rsi_mac80211_attach() - This function is used to de-initialize the
+ * rsi_mac80211_detach() - This function is used to de-initialize the
  *                        Mac80211 stack.
  * @adapter: Pointer to the adapter structure.
  *
@@ -340,6 +340,59 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
        mutex_unlock(&common->mutex);
 }
 
+/**
+ * rsi_channel_change() - This function is a performs the checks
+ *                       required for changing a channel and sets
+ *                       the channel accordingly.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_channel_change(struct ieee80211_hw *hw)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       int status = -EOPNOTSUPP;
+       struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+       u16 channel = curchan->hw_value;
+       struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf;
+
+       rsi_dbg(INFO_ZONE,
+               "%s: Set channel: %d MHz type: %d channel_no %d\n",
+               __func__, curchan->center_freq,
+               curchan->flags, channel);
+
+       if (bss->assoc) {
+               if (!common->hw_data_qs_blocked &&
+                   (rsi_get_connected_channel(adapter) != channel)) {
+                       rsi_dbg(INFO_ZONE, "blk data q %d\n", channel);
+                       if (!rsi_send_block_unblock_frame(common, true))
+                               common->hw_data_qs_blocked = true;
+               }
+       }
+
+       status = rsi_band_check(common);
+       if (!status)
+               status = rsi_set_channel(adapter->priv, channel);
+
+       if (bss->assoc) {
+               if (common->hw_data_qs_blocked &&
+                   (rsi_get_connected_channel(adapter) == channel)) {
+                       rsi_dbg(INFO_ZONE, "unblk data q %d\n", channel);
+                       if (!rsi_send_block_unblock_frame(common, false))
+                               common->hw_data_qs_blocked = false;
+               }
+       } else {
+               if (common->hw_data_qs_blocked) {
+                       rsi_dbg(INFO_ZONE, "unblk data q %d\n", channel);
+                       if (!rsi_send_block_unblock_frame(common, false))
+                               common->hw_data_qs_blocked = false;
+               }
+       }
+
+       return status;
+}
+
 /**
  * rsi_mac80211_config() - This function is a handler for configuration
  *                        requests. The stack calls this function to
@@ -357,17 +410,10 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
        int status = -EOPNOTSUPP;
 
        mutex_lock(&common->mutex);
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               struct ieee80211_channel *curchan = hw->conf.chandef.chan;
-               u16 channel = curchan->hw_value;
-
-               rsi_dbg(INFO_ZONE,
-                       "%s: Set channel: %d MHz type: %d channel_no %d\n",
-                       __func__, curchan->center_freq,
-                       curchan->flags, channel);
-               common->band = curchan->band;
-               status = rsi_set_channel(adapter->priv, channel);
-       }
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+               status = rsi_channel_change(hw);
+
        mutex_unlock(&common->mutex);
 
        return status;
@@ -421,6 +467,15 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
                                      bss_conf->qos,
                                      bss_conf->aid);
        }
+
+       if (changed & BSS_CHANGED_CQM) {
+               common->cqm_info.last_cqm_event_rssi = 0;
+               common->cqm_info.rssi_thold = bss_conf->cqm_rssi_thold;
+               common->cqm_info.rssi_hyst = bss_conf->cqm_rssi_hyst;
+               rsi_dbg(INFO_ZONE, "RSSI throld & hysteresis are: %d %d\n",
+                       common->cqm_info.rssi_thold,
+                       common->cqm_info.rssi_hyst);
+       }
        mutex_unlock(&common->mutex);
 }
 
@@ -723,23 +778,54 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
 {
        struct rsi_hw *adapter = hw->priv;
        struct rsi_common *common = adapter->priv;
+       enum ieee80211_band band = hw->conf.chandef.chan->band;
 
        mutex_lock(&common->mutex);
+       common->fixedrate_mask[band] = 0;
 
-       common->fixedrate_mask[IEEE80211_BAND_2GHZ] = 0;
-
-       if (mask->control[IEEE80211_BAND_2GHZ].legacy == 0xfff) {
-               common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
-                       (mask->control[IEEE80211_BAND_2GHZ].ht_mcs[0] << 12);
+       if (mask->control[band].legacy == 0xfff) {
+               common->fixedrate_mask[band] =
+                       (mask->control[band].ht_mcs[0] << 12);
        } else {
-               common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
-                       mask->control[IEEE80211_BAND_2GHZ].legacy;
+               common->fixedrate_mask[band] =
+                       mask->control[band].legacy;
        }
        mutex_unlock(&common->mutex);
 
        return 0;
 }
 
+/**
+ * rsi_perform_cqm() - This function performs cqm.
+ * @common: Pointer to the driver private structure.
+ * @bssid: pointer to the bssid.
+ * @rssi: RSSI value.
+ */
+static void rsi_perform_cqm(struct rsi_common *common,
+                           u8 *bssid,
+                           s8 rssi)
+{
+       struct rsi_hw *adapter = common->priv;
+       s8 last_event = common->cqm_info.last_cqm_event_rssi;
+       int thold = common->cqm_info.rssi_thold;
+       u32 hyst = common->cqm_info.rssi_hyst;
+       enum nl80211_cqm_rssi_threshold_event event;
+
+       if (rssi < thold && (last_event == 0 || rssi < (last_event - hyst)))
+               event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+       else if (rssi > thold &&
+                (last_event == 0 || rssi > (last_event + hyst)))
+               event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+       else
+               return;
+
+       common->cqm_info.last_cqm_event_rssi = rssi;
+       rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event);
+       ieee80211_cqm_rssi_notify(adapter->vifs[0], event, GFP_KERNEL);
+
+       return;
+}
+
 /**
  * rsi_fill_rx_status() - This function fills rx status in
  *                       ieee80211_rx_status structure.
@@ -755,6 +841,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
                               struct rsi_common *common,
                               struct ieee80211_rx_status *rxs)
 {
+       struct ieee80211_bss_conf *bss = &common->priv->vifs[0]->bss_conf;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct skb_info *rx_params = (struct skb_info *)info->driver_data;
        struct ieee80211_hdr *hdr;
@@ -770,10 +857,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
 
        rxs->signal = -(rssi);
 
-       if (channel <= 14)
-               rxs->band = IEEE80211_BAND_2GHZ;
-       else
-               rxs->band = IEEE80211_BAND_5GHZ;
+       rxs->band = common->band;
 
        freq = ieee80211_channel_to_frequency(channel, rxs->band);
 
@@ -792,6 +876,14 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw,
                rxs->flag |= RX_FLAG_DECRYPTED;
                rxs->flag |= RX_FLAG_IV_STRIPPED;
        }
+
+       /* CQM only for connected AP beacons, the RSSI is a weighted avg */
+       if (bss->assoc && !(memcmp(bss->bssid, hdr->addr2, ETH_ALEN))) {
+               if (ieee80211_is_beacon(hdr->frame_control))
+                       rsi_perform_cqm(common, hdr->addr2, rxs->signal);
+       }
+
+       return;
 }
 
 /**
@@ -983,6 +1075,7 @@ int rsi_mac80211_attach(struct rsi_common *common)
 
        hw->max_tx_aggregation_subframes = 6;
        rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
+       rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ);
        hw->rate_control_algorithm = "AARF";
 
        SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1000,6 +1093,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
        wiphy->available_antennas_tx = 1;
        wiphy->bands[IEEE80211_BAND_2GHZ] =
                &adapter->sbands[IEEE80211_BAND_2GHZ];
+       wiphy->bands[IEEE80211_BAND_5GHZ] =
+               &adapter->sbands[IEEE80211_BAND_5GHZ];
 
        status = ieee80211_register_hw(hw);
        if (status)
index 2eefbf159bc0d0abdcead9ff1caf6b867d95793f..8d110fd9eba1c95d4b1537c26dff1853c7407a42 100644 (file)
@@ -217,6 +217,7 @@ static void rsi_set_default_parameters(struct rsi_common *common)
        common->min_rate = 0xffff;
        common->fsm_state = FSM_CARD_NOT_READY;
        common->iface_down = true;
+       common->endpoint = EP_2GHZ_20MHZ;
 }
 
 /**
@@ -276,7 +277,6 @@ static int rsi_load_radio_caps(struct rsi_common *common)
 {
        struct rsi_radio_caps *radio_caps;
        struct rsi_hw *adapter = common->priv;
-       struct ieee80211_hw *hw = adapter->hw;
        u16 inx = 0;
        u8 ii;
        u8 radio_id = 0;
@@ -285,7 +285,6 @@ static int rsi_load_radio_caps(struct rsi_common *common)
                      0xf0, 0xf0, 0xf0, 0xf0,
                      0xf0, 0xf0, 0xf0, 0xf0,
                      0xf0, 0xf0, 0xf0, 0xf0};
-       struct ieee80211_conf *conf = &hw->conf;
        struct sk_buff *skb;
 
        rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__);
@@ -307,29 +306,36 @@ static int rsi_load_radio_caps(struct rsi_common *common)
        if (common->channel_width == BW_40MHZ) {
                radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ);
                radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ);
-               if (common->channel_width) {
-                       radio_caps->desc_word[5] =
-                               cpu_to_le16(common->channel_width << 12);
-                       radio_caps->desc_word[5] |= cpu_to_le16(FULL40M_ENABLE);
-               }
 
-               if (conf_is_ht40_minus(conf)) {
-                       radio_caps->desc_word[5] = 0;
-                       radio_caps->desc_word[5] |=
-                               cpu_to_le16(LOWER_20_ENABLE);
-                       radio_caps->desc_word[5] |=
-                               cpu_to_le16(LOWER_20_ENABLE >> 12);
-               }
-
-               if (conf_is_ht40_plus(conf)) {
-                       radio_caps->desc_word[5] = 0;
-                       radio_caps->desc_word[5] |=
-                               cpu_to_le16(UPPER_20_ENABLE);
-                       radio_caps->desc_word[5] |=
-                               cpu_to_le16(UPPER_20_ENABLE >> 12);
+               if (common->fsm_state == FSM_MAC_INIT_DONE) {
+                       struct ieee80211_hw *hw = adapter->hw;
+                       struct ieee80211_conf *conf = &hw->conf;
+                       if (conf_is_ht40_plus(conf)) {
+                               radio_caps->desc_word[5] =
+                                       cpu_to_le16(LOWER_20_ENABLE);
+                               radio_caps->desc_word[5] |=
+                                       cpu_to_le16(LOWER_20_ENABLE >> 12);
+                       } else if (conf_is_ht40_minus(conf)) {
+                               radio_caps->desc_word[5] =
+                                       cpu_to_le16(UPPER_20_ENABLE);
+                               radio_caps->desc_word[5] |=
+                                       cpu_to_le16(UPPER_20_ENABLE >> 12);
+                       } else {
+                               radio_caps->desc_word[5] =
+                                       cpu_to_le16(BW_40MHZ << 12);
+                               radio_caps->desc_word[5] |=
+                                       cpu_to_le16(FULL40M_ENABLE);
+                       }
                }
        }
 
+       radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE);
+       radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE);
+       radio_caps->slot_rx_11n = cpu_to_le16(SHORT_SLOT_VALUE);
+       radio_caps->ofdm_ack_tout = cpu_to_le16(OFDM_ACK_TOUT_VALUE);
+       radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE);
+       radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE);
+
        radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8);
 
        for (ii = 0; ii < MAX_HW_QUEUES; ii++) {
@@ -588,7 +594,7 @@ static int rsi_program_bb_rf(struct rsi_common *common)
 
        mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
        mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA);
-       mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint << 8);
+       mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint);
 
        if (common->rf_reset) {
                mgmt_frame->desc_word[7] =  cpu_to_le16(RF_RESET_ENABLE);
@@ -615,6 +621,9 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
 {
        struct sk_buff *skb = NULL;
        struct rsi_vap_caps *vap_caps;
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hw *hw = adapter->hw;
+       struct ieee80211_conf *conf = &hw->conf;
        u16 vap_id = 0;
 
        rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__);
@@ -644,13 +653,24 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
        vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD);
 
        vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
-       vap_caps->default_mgmt_rate = 0;
-       if (conf_is_ht40(&common->priv->hw->conf)) {
-               vap_caps->default_ctrl_rate =
-                               cpu_to_le32(RSI_RATE_6 | FULL40M_ENABLE << 16);
-       } else {
+       vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
+
+       if (common->band == IEEE80211_BAND_5GHZ) {
                vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
+               if (conf_is_ht40(&common->priv->hw->conf)) {
+                       vap_caps->default_ctrl_rate |=
+                               cpu_to_le32(FULL40M_ENABLE << 16);
+               }
+       } else {
+               vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_1);
+               if (conf_is_ht40_minus(conf))
+                       vap_caps->default_ctrl_rate |=
+                               cpu_to_le32(UPPER_20_ENABLE << 16);
+               else if (conf_is_ht40_plus(conf))
+                       vap_caps->default_ctrl_rate |=
+                               cpu_to_le32(LOWER_20_ENABLE << 16);
        }
+
        vap_caps->default_data_rate = 0;
        vap_caps->beacon_interval = cpu_to_le16(200);
        vap_caps->dtim_period = cpu_to_le16(4);
@@ -826,6 +846,63 @@ static int rsi_send_reset_mac(struct rsi_common *common)
        return rsi_send_internal_mgmt_frame(common, skb);
 }
 
+/**
+ * rsi_band_check() - This function programs the band
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+int rsi_band_check(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hw *hw = adapter->hw;
+       u8 prev_bw = common->channel_width;
+       u8 prev_ep = common->endpoint;
+       struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+       int status = 0;
+
+       if (common->band != curchan->band) {
+               common->rf_reset = 1;
+               common->band = curchan->band;
+       }
+
+       if ((hw->conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT) ||
+           (hw->conf.chandef.width == NL80211_CHAN_WIDTH_20))
+               common->channel_width = BW_20MHZ;
+       else
+               common->channel_width = BW_40MHZ;
+
+       if (common->band == IEEE80211_BAND_2GHZ) {
+               if (common->channel_width)
+                       common->endpoint = EP_2GHZ_40MHZ;
+               else
+                       common->endpoint = EP_2GHZ_20MHZ;
+       } else {
+               if (common->channel_width)
+                       common->endpoint = EP_5GHZ_40MHZ;
+               else
+                       common->endpoint = EP_5GHZ_20MHZ;
+       }
+
+       if (common->endpoint != prev_ep) {
+               status = rsi_program_bb_rf(common);
+               if (status)
+                       return status;
+       }
+
+       if (common->channel_width != prev_bw) {
+               status = rsi_load_bootup_params(common);
+               if (status)
+                       return status;
+
+               status = rsi_load_radio_caps(common);
+               if (status)
+                       return status;
+       }
+
+       return status;
+}
+
 /**
  * rsi_set_channel() - This function programs the channel.
  * @common: Pointer to the driver private structure.
@@ -841,23 +918,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
        rsi_dbg(MGMT_TX_ZONE,
                "%s: Sending scan req frame\n", __func__);
 
-       if (common->band == IEEE80211_BAND_5GHZ) {
-               if ((channel >= 36) && (channel <= 64))
-                       channel = ((channel - 32) / 4);
-               else if ((channel > 64) && (channel <= 140))
-                       channel = ((channel - 102) / 4) + 8;
-               else if (channel >= 149)
-                       channel = ((channel - 151) / 4) + 18;
-               else
-                       return -EINVAL;
-       } else {
-               if (channel > 14) {
-                       rsi_dbg(ERR_ZONE, "%s: Invalid chno %d, band = %d\n",
-                               __func__, channel, common->band);
-                       return -EINVAL;
-               }
-       }
-
        skb = dev_alloc_skb(FRAME_DESC_SZ);
        if (!skb) {
                rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
@@ -877,6 +937,7 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
                                               (RSI_RF_TYPE << 4));
 
        mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
+       mgmt_frame->desc_word[6] = cpu_to_le16(0x12);
 
        if (common->channel_width == BW_40MHZ)
                mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
@@ -950,7 +1011,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
        struct ieee80211_hw *hw = common->priv->hw;
        u8 band = hw->conf.chandef.chan->band;
        u8 num_supported_rates = 0;
-       u8 rate_offset = 0;
+       u8 rate_table_offset, rate_offset = 0;
        u32 rate_bitmap = common->bitrate_mask[band];
 
        u16 *selected_rates, min_rate;
@@ -986,14 +1047,19 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
        if (common->channel_width == BW_40MHZ)
                auto_rate->desc_word[7] |= cpu_to_le16(1);
 
-       if (band == IEEE80211_BAND_2GHZ)
-               min_rate = STD_RATE_01;
-       else
-               min_rate = STD_RATE_06;
+       if (band == IEEE80211_BAND_2GHZ) {
+               min_rate = RSI_RATE_1;
+               rate_table_offset = 0;
+       } else {
+               min_rate = RSI_RATE_6;
+               rate_table_offset = 4;
+       }
 
-       for (ii = 0, jj = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+       for (ii = 0, jj = 0;
+            ii < (ARRAY_SIZE(rsi_rates) - rate_table_offset); ii++) {
                if (rate_bitmap & BIT(ii)) {
-                       selected_rates[jj++] = (rsi_rates[ii].bitrate / 5);
+                       selected_rates[jj++] =
+                       (rsi_rates[ii + rate_table_offset].bitrate / 5);
                        rate_offset++;
                }
        }
@@ -1006,13 +1072,6 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
                rate_offset += ARRAY_SIZE(mcs);
        }
 
-       if (rate_offset < (RSI_TBL_SZ / 2) - 1) {
-               for (ii = jj; ii < (RSI_TBL_SZ / 2); ii++) {
-                       selected_rates[jj++] = min_rate;
-                       rate_offset++;
-               }
-       }
-
        sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
 
        /* mapping the rates to RSI rates */
@@ -1028,25 +1087,25 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
 
        /* loading HT rates in the bottom half of the auto rate table */
        if (common->vif_info[0].is_ht) {
-               if (common->vif_info[0].sgi)
-                       auto_rate->supported_rates[rate_offset++] =
-                               cpu_to_le16(RSI_RATE_MCS7_SG);
-
                for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1;
                     ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) {
-                       if (common->vif_info[0].sgi)
+                       if (common->vif_info[0].sgi ||
+                           conf_is_ht40(&common->priv->hw->conf))
                                auto_rate->supported_rates[ii++] =
                                        cpu_to_le16(rsi_mcsrates[kk] | BIT(9));
                        auto_rate->supported_rates[ii] =
                                cpu_to_le16(rsi_mcsrates[kk--]);
                }
 
-               for (; ii < RSI_TBL_SZ; ii++) {
+               for (; ii < (RSI_TBL_SZ - 1); ii++) {
                        auto_rate->supported_rates[ii] =
                                cpu_to_le16(rsi_mcsrates[0]);
                }
        }
 
+       for (; ii < RSI_TBL_SZ; ii++)
+               auto_rate->supported_rates[ii] = cpu_to_le16(min_rate);
+
        auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2);
        auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2);
        auto_rate->desc_word[7] |= cpu_to_le16(0 << 8);
@@ -1140,6 +1199,49 @@ static int rsi_eeprom_read(struct rsi_common *common)
        return rsi_send_internal_mgmt_frame(common, skb);
 }
 
+/**
+ * This function sends a frame to block/unblock
+ * data queues in the firmware
+ *
+ * @param common Pointer to the driver private structure.
+ * @param block event - block if true, unblock if false
+ * @return 0 on success, -1 on failure.
+ */
+int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
+{
+       struct rsi_mac_frame *mgmt_frame;
+       struct sk_buff *skb;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__);
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       mgmt_frame->desc_word[1] = cpu_to_le16(BLOCK_HW_QUEUE);
+
+       if (block_event == true) {
+               rsi_dbg(INFO_ZONE, "blocking the data qs\n");
+               mgmt_frame->desc_word[4] = cpu_to_le16(0xf);
+       } else {
+               rsi_dbg(INFO_ZONE, "unblocking the data qs\n");
+               mgmt_frame->desc_word[5] = cpu_to_le16(0xf);
+       }
+
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+
+}
+
+
 /**
  * rsi_handle_ta_confirm_type() - This function handles the confirm frames.
  * @common: Pointer to the driver private structure.
@@ -1164,7 +1266,7 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
                                common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
                        }
                } else {
-                       rsi_dbg(ERR_ZONE,
+                       rsi_dbg(INFO_ZONE,
                                "%s: Received bootup params cfm in %d state\n",
                                 __func__, common->fsm_state);
                        return 0;
@@ -1227,7 +1329,7 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
                                        __func__);
                        }
                } else {
-                       rsi_dbg(ERR_ZONE,
+                       rsi_dbg(INFO_ZONE,
                                "%s: Received radio caps cfm in %d state\n",
                                 __func__, common->fsm_state);
                        return 0;
@@ -1245,7 +1347,10 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
                                return rsi_mac80211_attach(common);
                        }
                } else {
-                       goto out;
+                       rsi_dbg(INFO_ZONE,
+                               "%s: Received bbb_rf cfm in %d state\n",
+                                __func__, common->fsm_state);
+                       return 0;
                }
                break;
 
index 8e48e72bae204ae0410d40743fe6e00fe918fe1f..702593f199971a89c227b3015b243efb203920c4 100644 (file)
@@ -81,6 +81,16 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
                /* Send fixed rate */
                frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
                frame_desc[4] = cpu_to_le16(common->min_rate);
+
+               if (conf_is_ht40(&common->priv->hw->conf))
+                       frame_desc[5] = cpu_to_le16(FULL40M_ENABLE);
+
+               if (common->vif_info[0].sgi) {
+                       if (common->min_rate & 0x100) /* Only MCS rates */
+                               frame_desc[4] |=
+                                       cpu_to_le16(ENABLE_SHORTGI_RATE);
+               }
+
        }
 
        frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
@@ -116,6 +126,8 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
        struct ieee80211_hdr *wh = NULL;
        struct ieee80211_tx_info *info;
        struct ieee80211_bss_conf *bss = NULL;
+       struct ieee80211_hw *hw = adapter->hw;
+       struct ieee80211_conf *conf = &hw->conf;
        struct skb_info *tx_params;
        int status = -E2BIG;
        __le16 *msg = NULL;
@@ -175,6 +187,11 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
        else
                msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
 
+       if (conf_is_ht40(conf)) {
+               msg[4] = cpu_to_le16(0xB | RSI_11G_MODE);
+               msg[5] = cpu_to_le16(0x6);
+       }
+
        /* Indicate to firmware to give cfm */
        if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
                msg[1] |= cpu_to_le16(BIT(10));
index 46e7af446f01028ad15eec09e461037bc9f9211c..8428858204a6763a6d4ccfbeb6c2c81a12e7a33f 100644 (file)
@@ -820,9 +820,11 @@ static struct sdio_driver rsi_driver = {
  */
 static int rsi_module_init(void)
 {
-       sdio_register_driver(&rsi_driver);
+       int ret;
+
+       ret = sdio_register_driver(&rsi_driver);
        rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
-       return 0;
+       return ret;
 }
 
 /**
index 20d11ccfffe3b757c06a232ed3c057228339a76b..4834a9abc17177a5cd769418475a433765ed075a 100644 (file)
@@ -401,14 +401,16 @@ void rsi_interrupt_handler(struct rsi_hw *adapter)
                        case BUFFER_AVAILABLE:
                                dev->rx_info.watch_bufferfull_count = 0;
                                dev->rx_info.buffer_full = false;
+                               dev->rx_info.semi_buffer_full = false;
                                dev->rx_info.mgmt_buffer_full = false;
                                rsi_sdio_ack_intr(common->priv,
                                                  (1 << PKT_BUFF_AVAILABLE));
-                               rsi_set_event((&common->tx_thread.event));
+                               rsi_set_event(&common->tx_thread.event);
+
                                rsi_dbg(ISR_ZONE,
-                                       "%s: ==> BUFFER_AVILABLE <==\n",
+                                       "%s: ==> BUFFER_AVAILABLE <==\n",
                                        __func__);
-                               dev->rx_info.buf_avilable_counter++;
+                               dev->rx_info.buf_available_counter++;
                                break;
 
                        case FIRMWARE_ASSERT_IND:
index 4c46e5631e2f0bf81196dd8b89e3a1dea8b00ec7..ef5d394f185bfb46cc24c9170eecf4f6cee4dab8 100644 (file)
@@ -25,7 +25,7 @@
  * @len: Length to be written.
  * @endpoint: Type of endpoint.
  *
- * Return: status: 0 on success, -1 on failure.
+ * Return: status: 0 on success, a negative error code on failure.
  */
 static int rsi_usb_card_write(struct rsi_hw *adapter,
                              void *buf,
@@ -60,7 +60,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter,
  * @data: Pointer to the data that has to be written.
  * @count: Number of multiple bytes to be written.
  *
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, a negative error code on failure.
  */
 static int rsi_write_multiple(struct rsi_hw *adapter,
                              u8 endpoint,
@@ -147,7 +147,7 @@ static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
  * @value: Value to be read.
  * @len: length of data to be read.
  *
- * Return: status: 0 on success, -1 on failure.
+ * Return: status: 0 on success, a negative error code on failure.
  */
 static int rsi_usb_reg_read(struct usb_device *usbdev,
                            u32 reg,
@@ -189,7 +189,7 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
  * @value: Value to write.
  * @len: Length of data to be written.
  *
- * Return: status: 0 on success, -1 on failure.
+ * Return: status: 0 on success, a negative error code on failure.
  */
 static int rsi_usb_reg_write(struct usb_device *usbdev,
                             u32 reg,
@@ -249,7 +249,7 @@ static void rsi_rx_done_handler(struct urb *urb)
  * rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
  * @adapter: Pointer to the adapter structure.
  *
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, a negative error code on failure.
  */
 static int rsi_rx_urb_submit(struct rsi_hw *adapter)
 {
@@ -281,7 +281,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter)
  * @data: Pointer to the data that has to be written.
  * @count: Number of multiple bytes to be written on to the registers.
  *
- * Return: status: 0 on success, -1 on failure.
+ * Return: status: 0 on success, a negative error code on failure.
  */
 int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
                                    u32 addr,
@@ -331,7 +331,7 @@ int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
  * @pkt: Pointer to the data to be written on to the card.
  * @len: Length of the data to be written on to the card.
  *
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, a negative error code on failure.
  */
 static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
                                       u8 *pkt,
@@ -359,6 +359,7 @@ static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
        struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
 
        rsi_kill_thread(&dev->rx_thread);
+       usb_free_urb(dev->rx_usb_urb[0]);
        kfree(adapter->priv->rx_data_pkt);
        kfree(dev->tx_buffer);
 }
@@ -368,7 +369,7 @@ static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
  * @adapter: Pointer to the adapter structure.
  * @pfunction: Pointer to USB interface structure.
  *
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, a negative error code on failure.
  */
 static int rsi_init_usb_interface(struct rsi_hw *adapter,
                                  struct usb_interface *pfunction)
@@ -397,8 +398,16 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter,
                return -ENOMEM;
        }
 
-       rsi_dev->tx_buffer = kmalloc(2048, GFP_ATOMIC);
+       rsi_dev->tx_buffer = kmalloc(2048, GFP_KERNEL);
+       if (!rsi_dev->tx_buffer) {
+               status = -ENOMEM;
+               goto fail_tx;
+       }
        rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
+       if (!rsi_dev->rx_usb_urb[0]) {
+               status = -ENOMEM;
+               goto fail_rx;
+       }
        rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
        rsi_dev->tx_blk_size = 252;
 
@@ -413,7 +422,7 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter,
                                    rsi_usb_rx_thread, "RX-Thread");
        if (status) {
                rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
-               goto fail;
+               goto fail_thread;
        }
 
 #ifdef CONFIG_RSI_DEBUGFS
@@ -424,8 +433,11 @@ static int rsi_init_usb_interface(struct rsi_hw *adapter,
        rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
        return 0;
 
-fail:
+fail_thread:
+       usb_free_urb(rsi_dev->rx_usb_urb[0]);
+fail_rx:
        kfree(rsi_dev->tx_buffer);
+fail_tx:
        kfree(common->rx_data_pkt);
        return status;
 }
@@ -437,7 +449,7 @@ fail:
  * @pfunction: Pointer to the USB interface structure.
  * @id: Pointer to the usb_device_id structure.
  *
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, a negative error code on failure.
  */
 static int rsi_probe(struct usb_interface *pfunction,
                     const struct usb_device_id *id)
@@ -445,6 +457,7 @@ static int rsi_probe(struct usb_interface *pfunction,
        struct rsi_hw *adapter;
        struct rsi_91x_usbdev *dev;
        u16 fw_status;
+       int status;
 
        rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
 
@@ -452,10 +465,11 @@ static int rsi_probe(struct usb_interface *pfunction,
        if (!adapter) {
                rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
                        __func__);
-               return 1;
+               return -ENOMEM;
        }
 
-       if (rsi_init_usb_interface(adapter, pfunction)) {
+       status = rsi_init_usb_interface(adapter, pfunction);
+       if (status) {
                rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
                        __func__);
                goto err;
@@ -465,26 +479,30 @@ static int rsi_probe(struct usb_interface *pfunction,
 
        dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
 
-       if (rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2) < 0)
+       status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2);
+       if (status)
                goto err1;
        else
                fw_status &= 1;
 
        if (!fw_status) {
-               if (rsi_usb_device_init(adapter->priv)) {
+               status = rsi_usb_device_init(adapter->priv);
+               if (status) {
                        rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
                                __func__);
                        goto err1;
                }
 
-               if (rsi_usb_reg_write(dev->usbdev,
-                                     USB_INTERNAL_REG_1,
-                                     RSI_USB_READY_MAGIC_NUM, 1) < 0)
+               status = rsi_usb_reg_write(dev->usbdev,
+                                          USB_INTERNAL_REG_1,
+                                          RSI_USB_READY_MAGIC_NUM, 1);
+               if (status)
                        goto err1;
                rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
        }
 
-       if (rsi_rx_urb_submit(adapter))
+       status = rsi_rx_urb_submit(adapter);
+       if (status)
                goto err1;
 
        return 0;
@@ -493,7 +511,7 @@ err1:
 err:
        rsi_91x_deinit(adapter);
        rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
-       return 1;
+       return status;
 }
 
 /**
@@ -550,33 +568,7 @@ static struct usb_driver rsi_driver = {
 #endif
 };
 
-/**
- * rsi_module_init() - This function registers the client driver.
- * @void: Void.
- *
- * Return: 0 on success.
- */
-static int rsi_module_init(void)
-{
-       usb_register(&rsi_driver);
-       rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
-       return 0;
-}
-
-/**
- * rsi_module_exit() - This function unregisters the client driver.
- * @void: Void.
- *
- * Return: None.
- */
-static void rsi_module_exit(void)
-{
-       usb_deregister(&rsi_driver);
-       rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
-}
-
-module_init(rsi_module_init);
-module_exit(rsi_module_exit);
+module_usb_driver(rsi_driver);
 
 MODULE_AUTHOR("Redpine Signals Inc");
 MODULE_DESCRIPTION("Common USB layer for RSI drivers");
index 2cb73e7edb98d9e11f0d2e02902517a81b4051aa..5baed945f60e2cb276eeeaa7a520156132af4a55 100644 (file)
@@ -115,6 +115,7 @@ struct wmm_qinfo {
        s32 weight;
        s32 wme_params;
        s32 pkt_contended;
+       s32 txop;
 };
 
 struct transmit_q_stats {
@@ -141,6 +142,12 @@ struct rsi_thread {
        atomic_t thread_done;
 };
 
+struct cqm_info {
+       s8 last_cqm_event_rssi;
+       int rssi_thold;
+       u32 rssi_hyst;
+};
+
 struct rsi_hw;
 
 struct rsi_common {
@@ -192,6 +199,11 @@ struct rsi_common {
        u8 selected_qnum;
        u32 pkt_cnt;
        u8 min_weight;
+
+       /* bgscan related */
+       struct cqm_info cqm_info;
+
+       bool hw_data_qs_blocked;
 };
 
 struct rsi_hw {
index 225215a3b8bb484d76b47ed853afb3aeb6eb2130..3741173fd3acea1132224caaeab02aba47d84b36 100644 (file)
@@ -69,6 +69,7 @@
 
 #define RSI_LMAC_CLOCK_80MHZ            0x1
 #define RSI_ENABLE_40MHZ                (0x1 << 3)
+#define ENABLE_SHORTGI_RATE            BIT(9)
 
 #define RX_BA_INDICATION                1
 #define RSI_TBL_SZ                      40
 #define BW_20MHZ                        0
 #define BW_40MHZ                        1
 
+#define EP_2GHZ_20MHZ                  0
+#define EP_2GHZ_40MHZ                  1
+#define EP_5GHZ_20MHZ                  2
+#define EP_5GHZ_40MHZ                  3
+
+#define SIFS_TX_11N_VALUE              580
+#define SIFS_TX_11B_VALUE              346
+#define SHORT_SLOT_VALUE               360
+#define LONG_SLOT_VALUE                        640
+#define OFDM_ACK_TOUT_VALUE            2720
+#define CCK_ACK_TOUT_VALUE             9440
+#define LONG_PREAMBLE                  0x0000
+#define SHORT_PREAMBLE                 0x0001
+
 #define RSI_SUPP_FILTERS       (FIF_ALLMULTI | FIF_PROBE_REQ |\
                                 FIF_BCN_PRBRESP_PROMISC)
 enum opmode {
@@ -153,7 +168,7 @@ enum cmd_frame_type {
        SCAN_REQUEST,
        TSF_UPDATE,
        PEER_NOTIFY,
-       BLOCK_UNBLOCK,
+       BLOCK_HW_QUEUE,
        SET_KEY_REQ,
        AUTO_RATE_IND,
        BOOTUP_PARAMS_REQUEST,
@@ -238,6 +253,12 @@ struct rsi_radio_caps {
        u8 num_11n_rates;
        u8 num_11ac_rates;
        __le16 gcpd_per_rate[20];
+       __le16 sifs_tx_11n;
+       __le16 sifs_tx_11b;
+       __le16 slot_rx_11n;
+       __le16 ofdm_ack_tout;
+       __le16 cck_ack_tout;
+       __le16 preamble_type;
 } __packed;
 
 static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
@@ -272,6 +293,7 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
 int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
                     u8 key_type, u8 key_id, u32 cipher);
 int rsi_set_channel(struct rsi_common *common, u16 chno);
+int rsi_send_block_unblock_frame(struct rsi_common *common, bool event);
 void rsi_inform_bss_status(struct rsi_common *common, u8 status,
                           const u8 *bssid, u8 qos_enable, u16 aid);
 void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
@@ -283,4 +305,5 @@ void rsi_core_qos_processor(struct rsi_common *common);
 void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
 int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
 int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
+int rsi_band_check(struct rsi_common *common);
 #endif
index df4b5e20e05f056d8e6dad66628c73dd32a2d3f4..c7e8f2be790184399f6aa3f102d054f58852d423 100644 (file)
@@ -30,7 +30,7 @@
 
 enum sdio_interrupt_type {
        BUFFER_FULL         = 0x0,
-       BUFFER_AVAILABLE    = 0x1,
+       BUFFER_AVAILABLE    = 0x2,
        FIRMWARE_ASSERT_IND = 0x3,
        MSDU_PACKET_PENDING = 0x4,
        UNKNOWN_INT         = 0XE
@@ -42,7 +42,7 @@ enum sdio_interrupt_type {
 #define PKT_MGMT_BUFF_FULL                      2
 #define MSDU_PKT_PENDING                        3
 /* Interrupt Bit Related Macros */
-#define PKT_BUFF_AVAILABLE                      0
+#define PKT_BUFF_AVAILABLE                      1
 #define FW_ASSERT_IND                           2
 
 #define RSI_DEVICE_BUFFER_STATUS_REGISTER       0xf3
@@ -84,7 +84,7 @@ enum sdio_interrupt_type {
 #define TA_HOLD_THREAD_VALUE         cpu_to_le32(0xF)
 #define TA_RELEASE_THREAD_VALUE      cpu_to_le32(0xF)
 #define TA_BASE_ADDR                 0x2200
-#define MISC_CFG_BASE_ADDR           0x4150
+#define MISC_CFG_BASE_ADDR           0x4105
 
 struct receive_info {
        bool buffer_full;
@@ -98,7 +98,7 @@ struct receive_info {
        u32 total_sdio_msdu_pending_intr;
        u32 total_sdio_unknown_intr;
        u32 buf_full_counter;
-       u32 buf_avilable_counter;
+       u32 buf_available_counter;
 };
 
 struct rsi_91x_sdiodev {
index c17fcf272728cb06ae25e95787003f6f59f52dba..893c9d5f3d6f09c659710a855fd8425a5475d28a 100644 (file)
@@ -947,6 +947,40 @@ static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev,
        return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index));
 }
 
+static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue = rt2x00dev->bcn;
+       struct queue_entry *entry;
+       int i, bcn_num = 0;
+       u64 off, reg = 0;
+       u32 bssid_dw1;
+
+       /*
+        * Setup offsets of all active beacons in BCN_OFFSET{0,1} registers.
+        */
+       for (i = 0; i < queue->limit; i++) {
+               entry = &queue->entries[i];
+               if (!test_bit(ENTRY_BCN_ENABLED, &entry->flags))
+                       continue;
+               off = rt2800_get_beacon_offset(rt2x00dev, entry->entry_idx);
+               reg |= off << (8 * bcn_num);
+               bcn_num++;
+       }
+
+       WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing);
+
+       rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg);
+       rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32));
+
+       /*
+        * H/W sends up to MAC_BSSID_DW1_BSS_BCN_NUM + 1 consecutive beacons.
+        */
+       rt2800_register_read(rt2x00dev, MAC_BSSID_DW1, &bssid_dw1);
+       rt2x00_set_field32(&bssid_dw1, MAC_BSSID_DW1_BSS_BCN_NUM,
+                          bcn_num > 0 ? bcn_num - 1 : 0);
+       rt2800_register_write(rt2x00dev, MAC_BSSID_DW1, bssid_dw1);
+}
+
 void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -1003,6 +1037,12 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
 
        rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
                                   entry->skb->len + padding_len);
+       __set_bit(ENTRY_BCN_ENABLED, &entry->flags);
+
+       /*
+        * Change global beacons settings.
+        */
+       rt2800_update_beacons_setup(rt2x00dev);
 
        /*
         * Restore beaconing state.
@@ -1053,7 +1093,12 @@ void rt2800_clear_beacon(struct queue_entry *entry)
         * Clear beacon.
         */
        rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
+       __clear_bit(ENTRY_BCN_ENABLED, &entry->flags);
 
+       /*
+        * Change global beacons settings.
+        */
+       rt2800_update_beacons_setup(rt2x00dev);
        /*
         * Restore beaconing state.
         */
@@ -1556,7 +1601,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
                if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
                        reg = le32_to_cpu(conf->bssid[1]);
                        rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
-                       rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
+                       rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
                        conf->bssid[1] = cpu_to_le32(reg);
                }
 
@@ -4517,28 +4562,6 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        if (ret)
                return ret;
 
-       rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
-       rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0,
-                          rt2800_get_beacon_offset(rt2x00dev, 0));
-       rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1,
-                          rt2800_get_beacon_offset(rt2x00dev, 1));
-       rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2,
-                          rt2800_get_beacon_offset(rt2x00dev, 2));
-       rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3,
-                          rt2800_get_beacon_offset(rt2x00dev, 3));
-       rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
-
-       rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
-       rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4,
-                          rt2800_get_beacon_offset(rt2x00dev, 4));
-       rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5,
-                          rt2800_get_beacon_offset(rt2x00dev, 5));
-       rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6,
-                          rt2800_get_beacon_offset(rt2x00dev, 6));
-       rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7,
-                          rt2800_get_beacon_offset(rt2x00dev, 7));
-       rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
-
        rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
        rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
 
index 4fa43a2eeb732bc1e5c5fe8308ae3d570bbc7dcb..9967a1d9f0eceee3a68cfc89534a6df1f39b1763 100644 (file)
@@ -141,8 +141,11 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
        if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
                return;
 
-       if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
+       if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) {
+               mutex_lock(&intf->beacon_skb_mutex);
                rt2x00queue_update_beacon(rt2x00dev, vif);
+               mutex_unlock(&intf->beacon_skb_mutex);
+       }
 }
 
 static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -216,7 +219,7 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
         * never be called for USB devices.
         */
        WARN_ON(rt2x00_is_usb(rt2x00dev));
-       rt2x00queue_update_beacon_locked(rt2x00dev, vif);
+       rt2x00queue_update_beacon(rt2x00dev, vif);
 }
 
 void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -1470,8 +1473,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
        /*
         * Free the driver data.
         */
-       if (rt2x00dev->drv_data)
-               kfree(rt2x00dev->drv_data);
+       kfree(rt2x00dev->drv_data);
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
 
index 004dff9b962d9753a0a7b43a52983acb0032f748..ad6e5a8d1e10fbc06dac3895288168f1d7d2fb65 100644 (file)
@@ -626,25 +626,24 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
         * Start/stop beaconing.
         */
        if (changes & BSS_CHANGED_BEACON_ENABLED) {
+               mutex_lock(&intf->beacon_skb_mutex);
                if (!bss_conf->enable_beacon && intf->enable_beacon) {
                        rt2x00dev->intf_beaconing--;
                        intf->enable_beacon = false;
-                       /*
-                        * Clear beacon in the H/W for this vif. This is needed
-                        * to disable beaconing on this particular interface
-                        * and keep it running on other interfaces.
-                        */
-                       rt2x00queue_clear_beacon(rt2x00dev, vif);
 
                        if (rt2x00dev->intf_beaconing == 0) {
                                /*
                                 * Last beaconing interface disabled
                                 * -> stop beacon queue.
                                 */
-                               mutex_lock(&intf->beacon_skb_mutex);
                                rt2x00queue_stop_queue(rt2x00dev->bcn);
-                               mutex_unlock(&intf->beacon_skb_mutex);
                        }
+                       /*
+                        * Clear beacon in the H/W for this vif. This is needed
+                        * to disable beaconing on this particular interface
+                        * and keep it running on other interfaces.
+                        */
+                       rt2x00queue_clear_beacon(rt2x00dev, vif);
                } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
                        rt2x00dev->intf_beaconing++;
                        intf->enable_beacon = true;
@@ -660,11 +659,10 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
                                 * First beaconing interface enabled
                                 * -> start beacon queue.
                                 */
-                               mutex_lock(&intf->beacon_skb_mutex);
                                rt2x00queue_start_queue(rt2x00dev->bcn);
-                               mutex_unlock(&intf->beacon_skb_mutex);
                        }
                }
+               mutex_unlock(&intf->beacon_skb_mutex);
        }
 
        /*
@@ -801,6 +799,8 @@ int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
 
        setup.tx = tx_ant;
        setup.rx = rx_ant;
+       setup.rx_chain_num = 0;
+       setup.tx_chain_num = 0;
 
        rt2x00lib_config_antenna(rt2x00dev, setup);
 
index 6f236ea180aa3df23dbf8be831f367c020f4a1a2..f0178fd4fe5ff8c078749e6df8b82d1ea7a26a0c 100644 (file)
@@ -119,14 +119,12 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
        /*
         * Allocate DMA memory for descriptor and buffer.
         */
-       addr = dma_alloc_coherent(rt2x00dev->dev,
-                                 queue->limit * queue->desc_size,
-                                 &dma, GFP_KERNEL);
+       addr = dma_zalloc_coherent(rt2x00dev->dev,
+                                  queue->limit * queue->desc_size, &dma,
+                                  GFP_KERNEL);
        if (!addr)
                return -ENOMEM;
 
-       memset(addr, 0, queue->limit * queue->desc_size);
-
        /*
         * Initialize all queue entries to contain valid addresses.
         */
index 5642ccceca7c5544ba6e2aad62b0f00b0870a2c1..8e68f87ab13c3081f062acc69fb71f59601e836f 100644 (file)
@@ -754,8 +754,6 @@ int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
        if (unlikely(!intf->beacon))
                return -ENOBUFS;
 
-       mutex_lock(&intf->beacon_skb_mutex);
-
        /*
         * Clean up the beacon skb.
         */
@@ -768,13 +766,11 @@ int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
        if (rt2x00dev->ops->lib->clear_beacon)
                rt2x00dev->ops->lib->clear_beacon(intf->beacon);
 
-       mutex_unlock(&intf->beacon_skb_mutex);
-
        return 0;
 }
 
-int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
-                                    struct ieee80211_vif *vif)
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+                             struct ieee80211_vif *vif)
 {
        struct rt2x00_intf *intf = vif_to_intf(vif);
        struct skb_frame_desc *skbdesc;
@@ -815,19 +811,6 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
 
 }
 
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
-                             struct ieee80211_vif *vif)
-{
-       struct rt2x00_intf *intf = vif_to_intf(vif);
-       int ret;
-
-       mutex_lock(&intf->beacon_skb_mutex);
-       ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
-       mutex_unlock(&intf->beacon_skb_mutex);
-
-       return ret;
-}
-
 bool rt2x00queue_for_each_entry(struct data_queue *queue,
                                enum queue_index start,
                                enum queue_index end,
index c48125be0e34247cfe30ba51be437bb03ef66ed3..2233b911a1d7d7c5992db5681dcc1907d9719769 100644 (file)
@@ -353,6 +353,7 @@ struct txentry_desc {
  */
 enum queue_entry_flags {
        ENTRY_BCN_ASSIGNED,
+       ENTRY_BCN_ENABLED,
        ENTRY_OWNER_DEVICE_DATA,
        ENTRY_DATA_PENDING,
        ENTRY_DATA_IO_FAILED,
index 2c1c02bafa10bbfe1f198279400a84ad08256bb4..4b904f70818487fde779ffd0c524284f23972e84 100644 (file)
@@ -16,6 +16,7 @@
  *
  * based also on:
  *  - portions of rtl8187se Linux staging driver, Copyright Realtek corp.
+ *    (available in drivers/staging/rtl8187se directory of Linux 3.14)
  *  - other GPL, unpublished (until now), Linux driver code,
  *    Copyright Larry Finger <Larry.Finger@lwfinger.net>
  *
@@ -209,7 +210,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
        struct rtl8180_priv *priv = dev->priv;
        struct rtl818x_rx_cmd_desc *cmd_desc;
        unsigned int count = 32;
-       u8 signal, agc, sq;
+       u8 agc, sq, signal = 1;
        dma_addr_t mapping;
 
        while (count--) {
@@ -222,12 +223,20 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        struct rtl8187se_rx_desc *desc = entry;
 
                        flags = le32_to_cpu(desc->flags);
+                       /* if ownership flag is set, then we can trust the
+                        * HW has written other fields. We must not trust
+                        * other descriptor data read before we checked (read)
+                        * the ownership flag
+                        */
+                       rmb();
                        flags2 = le32_to_cpu(desc->flags2);
                        tsft = le64_to_cpu(desc->tsft);
                } else {
                        struct rtl8180_rx_desc *desc = entry;
 
                        flags = le32_to_cpu(desc->flags);
+                       /* same as above */
+                       rmb();
                        flags2 = le32_to_cpu(desc->flags2);
                        tsft = le64_to_cpu(desc->tsft);
                }
@@ -266,18 +275,21 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        rx_status.rate_idx = (flags >> 20) & 0xF;
                        agc = (flags2 >> 17) & 0x7F;
 
-                       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
+                       switch (priv->chip_family) {
+                       case RTL818X_CHIP_FAMILY_RTL8185:
                                if (rx_status.rate_idx > 3)
-                                       signal = 90 - clamp_t(u8, agc, 25, 90);
+                                       signal = -clamp_t(u8, agc, 25, 90) - 9;
                                else
-                                       signal = 95 - clamp_t(u8, agc, 30, 95);
-                       } else if (priv->chip_family ==
-                                  RTL818X_CHIP_FAMILY_RTL8180) {
+                                       signal = -clamp_t(u8, agc, 30, 95);
+                               break;
+                       case RTL818X_CHIP_FAMILY_RTL8180:
                                sq = flags2 & 0xff;
                                signal = priv->rf->calc_rssi(agc, sq);
-                       } else {
+                               break;
+                       case RTL818X_CHIP_FAMILY_RTL8187SE:
                                /* TODO: rtl8187se rssi */
                                signal = 10;
+                               break;
                        }
                        rx_status.signal = signal;
                        rx_status.freq = dev->conf.chandef.chan->center_freq;
@@ -336,7 +348,6 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
                        info->flags |= IEEE80211_TX_STAT_ACK;
 
                info->status.rates[0].count = (flags & 0xFF) + 1;
-               info->status.rates[1].idx = -1;
 
                ieee80211_tx_status_irqsafe(dev, skb);
                if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -528,9 +539,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
        entry->plcp_len = cpu_to_le16(plcp_len);
        entry->tx_buf = cpu_to_le32(mapping);
 
-       entry->flags2 = info->control.rates[1].idx >= 0 ?
-               ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
-       entry->retry_limit = info->control.rates[0].count;
+       entry->retry_limit = info->control.rates[0].count - 1;
 
        /* We must be sure that tx_flags is written last because the HW
         * looks at it to check if the rest of data is valid or not
@@ -852,7 +861,7 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
 
        if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
                rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
-               rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
+               rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0);
        } else {
                rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
 
@@ -868,6 +877,16 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
                reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
                rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
                rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+               /* fix eccessive IFS after CTS-to-self */
+               if (priv->map_pio) {
+                       u8 reg;
+
+                       reg = rtl818x_ioread8(priv, &priv->map->PGSELECT);
+                       rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1);
+                       rtl818x_iowrite8(priv, REG_ADDR1(0xff), 0x35);
+                       rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg);
+               } else
+                       rtl818x_iowrite8(priv, REG_ADDR1(0x1ff), 0x35);
        }
 
        if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
@@ -1450,9 +1469,10 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
        vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
 
        if (changed & BSS_CHANGED_BSSID) {
-               for (i = 0; i < ETH_ALEN; i++)
-                       rtl818x_iowrite8(priv, &priv->map->BSSID[i],
-                                        info->bssid[i]);
+               rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->BSSID[0],
+                                 le16_to_cpu(*(__le16 *)info->bssid));
+               rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->BSSID[2],
+                                 le32_to_cpu(*(__le32 *)(info->bssid + 2)));
 
                if (is_valid_ether_addr(info->bssid)) {
                        if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -1723,17 +1743,20 @@ static int rtl8180_probe(struct pci_dev *pdev,
        priv = dev->priv;
        priv->pdev = pdev;
 
-       dev->max_rates = 2;
+       dev->max_rates = 1;
        SET_IEEE80211_DEV(dev, &pdev->dev);
        pci_set_drvdata(pdev, dev);
 
+       priv->map_pio = false;
        priv->map = pci_iomap(pdev, 1, mem_len);
-       if (!priv->map)
+       if (!priv->map) {
                priv->map = pci_iomap(pdev, 0, io_len);
+               priv->map_pio = true;
+       }
 
        if (!priv->map) {
-               printk(KERN_ERR "%s (rtl8180): Cannot map device memory\n",
-                      pci_name(pdev));
+               dev_err(&pdev->dev, "Cannot map device memory/PIO\n");
+               err = -ENOMEM;
                goto err_free_dev;
        }
 
@@ -1751,8 +1774,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
        dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
 
        dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
-                    IEEE80211_HW_RX_INCLUDES_FCS |
-                    IEEE80211_HW_SIGNAL_UNSPEC;
+               IEEE80211_HW_RX_INCLUDES_FCS;
        dev->vif_data_size = sizeof(struct rtl8180_vif);
        dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                        BIT(NL80211_IFTYPE_ADHOC);
@@ -1783,12 +1805,19 @@ static int rtl8180_probe(struct pci_dev *pdev,
 
        case RTL818X_TX_CONF_RTL8187SE:
                chip_name = "RTL8187SE";
+               if (priv->map_pio) {
+                       dev_err(&pdev->dev,
+                               "MMIO failed. PIO not supported on RTL8187SE\n");
+                       err = -ENOMEM;
+                       goto err_iounmap;
+               }
                priv->chip_family = RTL818X_CHIP_FAMILY_RTL8187SE;
                break;
 
        default:
                printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
                       pci_name(pdev), reg >> 25);
+               err = -ENODEV;
                goto err_iounmap;
        }
 
@@ -1809,6 +1838,11 @@ static int rtl8180_probe(struct pci_dev *pdev,
                pci_try_set_mwi(pdev);
        }
 
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185)
+               dev->flags |= IEEE80211_HW_SIGNAL_DBM;
+       else
+               dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
+
        rtl8180_eeprom_read(priv);
 
        switch (priv->rf_type) {
@@ -1834,12 +1868,14 @@ static int rtl8180_probe(struct pci_dev *pdev,
        default:
                printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
                       pci_name(pdev), priv->rf_type);
+               err = -ENODEV;
                goto err_iounmap;
        }
 
        if (!priv->rf) {
                printk(KERN_ERR "%s (rtl8180): %s RF frontend not supported!\n",
                       pci_name(pdev), rf_name);
+               err = -ENODEV;
                goto err_iounmap;
        }
 
index 291a55970d1ab0ebdaabcc6463a4a458afab307d..e8243a44d6b6074136921154d498bbf63facc0c9 100644 (file)
@@ -107,6 +107,7 @@ struct rtl8180_priv {
        struct ieee80211_vif *vif;
 
        /* rtl8180 driver specific */
+       bool map_pio;
        spinlock_t lock;
        void *rx_ring;
        u8 rx_ring_sz;
index 871fc3c6d559f63b65f1322417a1e2d61a8fc955..049f4c8d98a8675baa69bd993a1bfe268bd08f2f 100644 (file)
@@ -114,7 +114,7 @@ extern u32 btc_dbg_type[];
 
 
 #define        CL_SPRINTF      snprintf
-#define        CL_PRINTF       printk
+#define        CL_PRINTF(buf)  printk("%s", buf)
 
 #define        BTC_PRINT(dbgtype, dbgflag, printstr, ...)              \
        do {                                                    \
index b1ed6d0796f67e187fb928423edda6977c91f863..56e218e0469cd418c29acb072ef8665b91cb5092 100644 (file)
@@ -1064,7 +1064,6 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
                         "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
                return rtl_tx_agg_start(hw, sta, tid, ssn);
-               break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
index b14cf5a10f4421127e8f6ce414eee52054129163..d840ad7bdf65e82248a97190e43fce3c97d4abaf 100644 (file)
@@ -1231,7 +1231,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
 
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index a9cfa13be3a8886765c00a0179b2f47e9137273e..0f9314205526b469727eeba332687e2b3e5a477d 100644 (file)
@@ -125,7 +125,6 @@ bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                                         "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
                                return true;
-                               break;
                        default:
                                RT_ASSERT(false,
                                          "rtl88_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
index 7af85cfa8f8706f0c67c0bf2082d11889fa662d9..cd7e7a52713380966bc5128548ab8bf6c266233a 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define        MSR_MASK                                0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index cdecb0fd4d8edb531c34cb929b8a13f568f40eed..df98a5e4729acef3065040d1f7c719eb829a3a83 100644 (file)
@@ -1200,13 +1200,12 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Network type %d not supported!\n", type);
                return 1;
-               break;
 
        }
 
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index ed703a1b3b7c1c54490efb803e804000a2d43342..dc8460c0b32f44f60b8644cb04409c7d4235926a 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define        MSR_MASK                                0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index a903c2671b4d1701c0c71416748b5d4fa8ec6bf9..270cbffcac70cf02329fc1093f82e00244b78cae 100644 (file)
@@ -1360,7 +1360,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
        }
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index 2b08671004a0aa88b4c6ea270ebc4e7c8368aee6..280c3da42993dbd6154ba8c5ab9a6abbb5784d2d 100644 (file)
@@ -1128,7 +1128,7 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
        }
        rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index 3d1f0dd4e52d89825710544078895752895b2c76..592125a5f19cbd3b43501d26345abe68c3b11fc8 100644 (file)
@@ -203,11 +203,12 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        u32 returnvalue, originalvalue, bitshift;
-       u8 dbi_direct;
 
        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
                 regaddr, bitmask);
        if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) {
+               u8 dbi_direct = 0;
+
                /* mac1 use phy0 read radio_b. */
                /* mac0 use phy1 read radio_b. */
                if (rtlhal->during_mac1init_radioa)
index 7f29b8d765b37cf22e1a8cf362144d4229a2f583..315a298bab06a756525ef81ea14f66105e426f03 100644 (file)
 #define        MSR_ADHOC                       0x01
 #define        MSR_INFRA                       0x02
 #define        MSR_AP                          0x03
+#define        MSR_MASK                        0x03
 
 /* 6. Adaptive Control Registers  (Offset: 0x0160 - 0x01CF) */
 /* ----------------------------------------------------- */
index 380e7d4b1ccf8a3629ab142354b88c35535629f1..331b1584a1a2d27aae6871b0be096f58c45e318c 100644 (file)
@@ -112,13 +112,10 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
        switch (rtlphy->rf_type) {
        case RF_1T1R:
                return 0x11;
-               break;
        case RF_1T2R:
                return 0x12;
-               break;
        case RF_2T2R:
                return 0x22;
-               break;
        default:
                RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
                         rtlphy->rf_type);
@@ -438,7 +435,6 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                                 "Unexpected Download step!!\n");
                        goto fail;
-                       break;
                }
 
                /* <2> Download image file */
index 1c7101bcd79034c486be9713d99c19c335478cb6..00e067044c08d0a9cafa9009466eb22c21965d56 100644 (file)
@@ -1198,7 +1198,6 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Network type %d not supported!\n", type);
                return 1;
-               break;
 
        }
 
index 87f69166a7eda86b2517feb6237da073c0ebe9da..662a079f76f3bdc01b07e6f7d41dbed0fe3ff473 100644 (file)
@@ -1103,13 +1103,12 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
                         "Network type %d not supported!\n",
                         type);
                return 1;
-               break;
 
        }
 
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0x03) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index 64376b38708bd2e216f4b23a18da87247b4059b9..ce2c66fd9eeeaec1f943130a8190805d848d631b 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define        MSR_MASK                                0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index 3d555495b45319b8d287d9edd5e1bc1c6162e625..3cd286930fe0088d73b2046a970f3349a4531a3b 100644 (file)
@@ -1197,7 +1197,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
        }
        rtl_write_byte(rtlpriv, (MSR), bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0x03) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index e4a507a756fb62ffcc15d92a98485e52f797ac92..4573310c707fb3b82a3fda0c1207d35f3224737c 100644 (file)
@@ -124,7 +124,6 @@ bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                         "rtlbe_hal_pwrseqcmdparsing(): "
                                         "PWR_CMD_END\n");
                                return true;
-                               break;
                        default:
                                RT_ASSERT(false,
                                          "rtlbe_hal_pwrseqcmdparsing(): "
index 4c653fab8795fd90de2381e4c48d4716be03c95d..3006849ed439bde2b40e8858c847761e67433ed0 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define        MSR_MASK                                0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index 4e782f18ae3431600a66923216faa536d42c46b6..38234851457e51cd22829a1ea25c7e26cc584c21 100644 (file)
@@ -991,8 +991,9 @@ out:
 
 static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
-                            struct cfg80211_scan_request *req)
+                            struct ieee80211_scan_request *hw_req)
 {
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct wl1251 *wl = hw->priv;
        struct sk_buff *skb;
        size_t ssid_len = 0;
index 7541bd1a4a4b40de9b6be249dea94ca8f95e96ae..0c0d5cd98514207c25b3faf804573f13af78976e 100644 (file)
@@ -156,7 +156,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                                         cmd->params.role_id, band,
                                         wl->scan.ssid, wl->scan.ssid_len,
                                         wl->scan.req->ie,
-                                        wl->scan.req->ie_len, false);
+                                        wl->scan.req->ie_len, NULL, 0, false);
        if (ret < 0) {
                wl1271_error("PROBE request template failed");
                goto out;
@@ -317,7 +317,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                                  struct wl12xx_vif *wlvif,
                                  struct cfg80211_sched_scan_request *req,
-                                 struct ieee80211_sched_scan_ies *ies)
+                                 struct ieee80211_scan_ies *ies)
 {
        struct wl1271_cmd_sched_scan_config *cfg = NULL;
        struct wlcore_scan_channels *cfg_channels = NULL;
@@ -378,8 +378,11 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                                                 wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
-                                                ies->ie[band],
-                                                ies->len[band], true);
+                                                ies->ies[band],
+                                                ies->len[band],
+                                                ies->common_ies,
+                                                ies->common_ie_len,
+                                                true);
                if (ret < 0) {
                        wl1271_error("2.4GHz PROBE request template failed");
                        goto out;
@@ -392,8 +395,11 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                                                 wlvif->role_id, band,
                                                 req->ssids[0].ssid,
                                                 req->ssids[0].ssid_len,
-                                                ies->ie[band],
-                                                ies->len[band], true);
+                                                ies->ies[band],
+                                                ies->len[band],
+                                                ies->common_ies,
+                                                ies->common_ie_len,
+                                                true);
                if (ret < 0) {
                        wl1271_error("5GHz PROBE request template failed");
                        goto out;
@@ -449,7 +455,7 @@ out_free:
 
 int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif  *wlvif,
                            struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_sched_scan_ies *ies)
+                           struct ieee80211_scan_ies *ies)
 {
        int ret;
 
index 264af7ac27854721c372379c4b59cd2248facac6..427f9af85a00d5f6b8d2676d102f12734ad2db13 100644 (file)
@@ -135,6 +135,6 @@ int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif  *wlvif,
                            struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_sched_scan_ies *ies);
+                           struct ieee80211_scan_ies *ies);
 void wl12xx_scan_sched_scan_stop(struct wl1271 *wl,  struct wl12xx_vif *wlvif);
 #endif
index 2b642f8c9266ef21321f8276e6ccbd9a719170af..98666f235a12d9a70873e18eaa3245298007284f 100644 (file)
@@ -113,6 +113,8 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                                 req->ssids ? req->ssids[0].ssid_len : 0,
                                 req->ie,
                                 req->ie_len,
+                                NULL,
+                                0,
                                 false);
                if (ret < 0) {
                        wl1271_error("2.4GHz PROBE request template failed");
@@ -128,6 +130,8 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                                 req->ssids ? req->ssids[0].ssid_len : 0,
                                 req->ie,
                                 req->ie_len,
+                                NULL,
+                                0,
                                 false);
                if (ret < 0) {
                        wl1271_error("5GHz PROBE request template failed");
@@ -161,7 +165,7 @@ static
 int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
                                  struct wl12xx_vif *wlvif,
                                  struct cfg80211_sched_scan_request *req,
-                                 struct ieee80211_sched_scan_ies *ies)
+                                 struct ieee80211_scan_ies *ies)
 {
        struct wl18xx_cmd_scan_params *cmd;
        struct wlcore_scan_channels *cmd_channels = NULL;
@@ -237,8 +241,10 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
                                 req->ssids ? req->ssids[0].ssid_len : 0,
-                                ies->ie[band],
+                                ies->ies[band],
                                 ies->len[band],
+                                ies->common_ies,
+                                ies->common_ie_len,
                                 true);
                if (ret < 0) {
                        wl1271_error("2.4GHz PROBE request template failed");
@@ -252,8 +258,10 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
                                 cmd->role_id, band,
                                 req->ssids ? req->ssids[0].ssid : NULL,
                                 req->ssids ? req->ssids[0].ssid_len : 0,
-                                ies->ie[band],
+                                ies->ies[band],
                                 ies->len[band],
+                                ies->common_ies,
+                                ies->common_ie_len,
                                 true);
                if (ret < 0) {
                        wl1271_error("5GHz PROBE request template failed");
@@ -277,7 +285,7 @@ out:
 
 int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                            struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_sched_scan_ies *ies)
+                           struct ieee80211_scan_ies *ies)
 {
        return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
 }
index eadee42689d1802ba6eba714ca798703f2abe843..2e636aa5dba9bac8d11ff63d5fe02b7efc19af9c 100644 (file)
@@ -122,6 +122,6 @@ int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                            struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_sched_scan_ies *ies);
+                           struct ieee80211_scan_ies *ies);
 void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 #endif
index 40dc30f4faaab2b2be20c724abe54c7807f0871a..e269c0a57017a270b0c724abf5426f105fa7343a 100644 (file)
@@ -1124,7 +1124,8 @@ out:
 int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               u8 role_id, u8 band,
                               const u8 *ssid, size_t ssid_len,
-                              const u8 *ie, size_t ie_len, bool sched_scan)
+                              const u8 *ie0, size_t ie0_len, const u8 *ie1,
+                              size_t ie1_len, bool sched_scan)
 {
        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
        struct sk_buff *skb;
@@ -1136,13 +1137,15 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
 
        skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
-                                    ie_len);
+                                    ie0_len + ie1_len);
        if (!skb) {
                ret = -ENOMEM;
                goto out;
        }
-       if (ie_len)
-               memcpy(skb_put(skb, ie_len), ie, ie_len);
+       if (ie0_len)
+               memcpy(skb_put(skb, ie0_len), ie0, ie0_len);
+       if (ie1_len)
+               memcpy(skb_put(skb, ie1_len), ie1, ie1_len);
 
        if (sched_scan &&
            (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
index b084830a61cf51adbe40b8461fc28510dd13bd5a..6788d7356ca5a8d6d4aab0c6d2404985f4a072e1 100644 (file)
@@ -64,7 +64,8 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               u8 role_id, u8 band,
                               const u8 *ssid, size_t ssid_len,
-                              const u8 *ie, size_t ie_len, bool sched_scan);
+                              const u8 *ie, size_t ie_len, const u8 *common_ie,
+                              size_t common_ie_len, bool sched_scan);
 struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
                                              struct wl12xx_vif *wlvif,
                                              struct sk_buff *skb);
index 3d6028e62750279299431ce56315bdf67fa1f8cc..48f83868f9cbee35a34871ce078f3615e80827e1 100644 (file)
@@ -3540,8 +3540,9 @@ out:
 
 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
-                            struct cfg80211_scan_request *req)
+                            struct ieee80211_scan_request *hw_req)
 {
+       struct cfg80211_scan_request *req = &hw_req->req;
        struct wl1271 *wl = hw->priv;
        int ret;
        u8 *ssid = NULL;
@@ -3636,7 +3637,7 @@ out:
 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
                                      struct ieee80211_vif *vif,
                                      struct cfg80211_sched_scan_request *req,
-                                     struct ieee80211_sched_scan_ies *ies)
+                                     struct ieee80211_scan_ies *ies)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
index a6ab24b5c0f96ab86278f71b93aae80a11048433..4dadd0c62cde5251f83d924e20b1c5312e3d2806 100644 (file)
@@ -37,7 +37,7 @@ void wl1271_scan_complete_work(struct work_struct *work);
 int wl1271_scan_sched_scan_config(struct wl1271 *wl,
                                     struct wl12xx_vif *wlvif,
                                     struct cfg80211_sched_scan_request *req,
-                                    struct ieee80211_sched_scan_ies *ies);
+                                    struct ieee80211_scan_ies *ies);
 int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 void wlcore_scan_sched_scan_results(struct wl1271 *wl);
 
index 95a54504f0cc3815831d212906002faf6e0e8904..71320509b56d5bc3133d602888092a6e566cb89e 100644 (file)
@@ -95,7 +95,7 @@ struct wlcore_ops {
        int (*scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
        int (*sched_scan_start)(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                                struct cfg80211_sched_scan_request *req,
-                               struct ieee80211_sched_scan_ies *ies);
+                               struct ieee80211_scan_ies *ies);
        void (*sched_scan_stop)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
        int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
        int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
index 2532ce85d718fc018ccea2fccbe45ecd707602e4..28c98229e95f66c9123fae82a0d3e453fd761b42 100644 (file)
@@ -44,6 +44,7 @@
 #include <xen/interface/grant_table.h>
 #include <xen/grant_table.h>
 #include <xen/xenbus.h>
+#include <linux/debugfs.h>
 
 typedef unsigned int pending_ring_idx_t;
 #define INVALID_PENDING_RING_IDX (~0U)
@@ -224,6 +225,10 @@ struct xenvif {
        struct xenvif_queue *queues;
        unsigned int num_queues; /* active queues, resource allocated */
 
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *xenvif_dbg_root;
+#endif
+
        /* Miscellaneous private stuff. */
        struct net_device *dev;
 };
@@ -297,10 +302,16 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
 /* Callback from stack when TX packet can be released */
 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
 
+irqreturn_t xenvif_interrupt(int irq, void *dev_id);
+
 extern bool separate_tx_rx_irq;
 
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_drain_timeout_jiffies;
 extern unsigned int xenvif_max_queues;
 
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *xen_netback_dbg_root;
+#endif
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
index 9e97c7ca0ddd1f1f0439a6f20c72ac2c13c1db59..bd59d9dbf27b1e11e049309745bed20b7d6e97a2 100644 (file)
@@ -102,7 +102,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 {
        xenvif_tx_interrupt(irq, dev_id);
        xenvif_rx_interrupt(irq, dev_id);
@@ -418,8 +418,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
         * When the guest selects the desired number, it will be updated
         * via netif_set_real_num_*_queues().
         */
-       dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
-                             xenvif_max_queues);
+       dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
+                             ether_setup, xenvif_max_queues);
        if (dev == NULL) {
                pr_warn("Could not allocate netdev for %s\n", name);
                return ERR_PTR(-ENOMEM);
index c65b636bcab9dfb3bdf3528bd3d0fcd30f7a5812..769e553d3f45dfc56a12caf39133dd9870fcb69c 100644 (file)
@@ -2027,6 +2027,13 @@ static int __init netback_init(void)
 
        rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
 
+#ifdef CONFIG_DEBUG_FS
+       xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               pr_warn("Init of debugfs returned %ld!\n",
+                       PTR_ERR(xen_netback_dbg_root));
+#endif /* CONFIG_DEBUG_FS */
+
        return 0;
 
 failed_init:
@@ -2037,6 +2044,10 @@ module_init(netback_init);
 
 static void __exit netback_fini(void)
 {
+#ifdef CONFIG_DEBUG_FS
+       if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
+               debugfs_remove_recursive(xen_netback_dbg_root);
+#endif /* CONFIG_DEBUG_FS */
        xenvif_xenbus_fini();
 }
 module_exit(netback_fini);
index 3d85acd84bad03c5f3e0b4578e8bb414654b165b..580517d857bf7c753e3a66d6f1da6177f81365c3 100644 (file)
@@ -44,6 +44,175 @@ static void unregister_hotplug_status_watch(struct backend_info *be);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
 
+#ifdef CONFIG_DEBUG_FS
+struct dentry *xen_netback_dbg_root = NULL;
+
+static int xenvif_read_io_ring(struct seq_file *m, void *v)
+{
+       struct xenvif_queue *queue = m->private;
+       struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
+       struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
+
+       if (tx_ring->sring) {
+               struct xen_netif_tx_sring *sring = tx_ring->sring;
+
+               seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
+                          tx_ring->nr_ents);
+               seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+                          sring->req_prod,
+                          sring->req_prod - sring->rsp_prod,
+                          tx_ring->req_cons,
+                          tx_ring->req_cons - sring->rsp_prod,
+                          sring->req_event,
+                          sring->req_event - sring->rsp_prod);
+               seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
+                          sring->rsp_prod,
+                          tx_ring->rsp_prod_pvt,
+                          tx_ring->rsp_prod_pvt - sring->rsp_prod,
+                          sring->rsp_event,
+                          sring->rsp_event - sring->rsp_prod);
+               seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
+                          queue->pending_prod,
+                          queue->pending_cons,
+                          nr_pending_reqs(queue));
+               seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
+                          queue->dealloc_prod,
+                          queue->dealloc_cons,
+                          queue->dealloc_prod - queue->dealloc_cons);
+       }
+
+       if (rx_ring->sring) {
+               struct xen_netif_rx_sring *sring = rx_ring->sring;
+
+               seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
+               seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+                          sring->req_prod,
+                          sring->req_prod - sring->rsp_prod,
+                          rx_ring->req_cons,
+                          rx_ring->req_cons - sring->rsp_prod,
+                          sring->req_event,
+                          sring->req_event - sring->rsp_prod);
+               seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
+                          sring->rsp_prod,
+                          rx_ring->rsp_prod_pvt,
+                          rx_ring->rsp_prod_pvt - sring->rsp_prod,
+                          sring->rsp_event,
+                          sring->rsp_event - sring->rsp_prod);
+       }
+
+       seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
+                  "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
+                  "remaining: %lu, expires: %lu, now: %lu\n",
+                  queue->napi.state, queue->napi.weight,
+                  skb_queue_len(&queue->tx_queue),
+                  timer_pending(&queue->credit_timeout),
+                  queue->credit_bytes,
+                  queue->credit_usec,
+                  queue->remaining_credit,
+                  queue->credit_timeout.expires,
+                  jiffies);
+
+       return 0;
+}
+
+#define XENVIF_KICK_STR "kick"
+
+static ssize_t
+xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
+                    loff_t *ppos)
+{
+       struct xenvif_queue *queue =
+               ((struct seq_file *)filp->private_data)->private;
+       int len;
+       char write[sizeof(XENVIF_KICK_STR)];
+
+       /* don't allow partial writes and check the length */
+       if (*ppos != 0)
+               return 0;
+       if (count < sizeof(XENVIF_KICK_STR) - 1)
+               return -ENOSPC;
+
+       len = simple_write_to_buffer(write,
+                                    sizeof(write),
+                                    ppos,
+                                    buf,
+                                    count);
+       if (len < 0)
+               return len;
+
+       if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
+               xenvif_interrupt(0, (void *)queue);
+       else {
+               pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
+                       queue->id);
+               count = -EINVAL;
+       }
+       return count;
+}
+
+static int xenvif_dump_open(struct inode *inode, struct file *filp)
+{
+       int ret;
+       void *queue = NULL;
+
+       if (inode->i_private)
+               queue = inode->i_private;
+       ret = single_open(filp, xenvif_read_io_ring, queue);
+       filp->f_mode |= FMODE_PWRITE;
+       return ret;
+}
+
+static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
+       .owner = THIS_MODULE,
+       .open = xenvif_dump_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = xenvif_write_io_ring,
+};
+
+static void xenvif_debugfs_addif(struct xenvif_queue *queue)
+{
+       struct dentry *pfile;
+       struct xenvif *vif = queue->vif;
+       int i;
+
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               return;
+
+       vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
+                                                 xen_netback_dbg_root);
+       if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
+               for (i = 0; i < vif->num_queues; ++i) {
+                       char filename[sizeof("io_ring_q") + 4];
+
+                       snprintf(filename, sizeof(filename), "io_ring_q%d", i);
+                       pfile = debugfs_create_file(filename,
+                                                   S_IRUSR | S_IWUSR,
+                                                   vif->xenvif_dbg_root,
+                                                   &vif->queues[i],
+                                                   &xenvif_dbg_io_ring_ops_fops);
+                       if (IS_ERR_OR_NULL(pfile))
+                               pr_warn("Creation of io_ring file returned %ld!\n",
+                                       PTR_ERR(pfile));
+               }
+       } else
+               netdev_warn(vif->dev,
+                           "Creation of vif debugfs dir returned %ld!\n",
+                           PTR_ERR(vif->xenvif_dbg_root));
+}
+
+static void xenvif_debugfs_delif(struct xenvif *vif)
+{
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               return;
+
+       if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root))
+               debugfs_remove_recursive(vif->xenvif_dbg_root);
+       vif->xenvif_dbg_root = NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
 static int netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -246,8 +415,12 @@ static void backend_create_xenvif(struct backend_info *be)
 
 static void backend_disconnect(struct backend_info *be)
 {
-       if (be->vif)
+       if (be->vif) {
+#ifdef CONFIG_DEBUG_FS
+               xenvif_debugfs_delif(be->vif);
+#endif /* CONFIG_DEBUG_FS */
                xenvif_disconnect(be->vif);
+       }
 }
 
 static void backend_connect(struct backend_info *be)
@@ -560,6 +733,9 @@ static void connect(struct backend_info *be)
                        be->vif->num_queues = queue_index;
                        goto err;
                }
+#ifdef CONFIG_DEBUG_FS
+               xenvif_debugfs_addif(queue);
+#endif /* CONFIG_DEBUG_FS */
        }
 
        /* Initialisation completed, tell core driver the number of
index 419056d7887ec9f516e46699bdc0f46dfe85eeb7..f8a76090cbca1e8bbf694a4fbc16a54ef60490d5 100644 (file)
@@ -86,17 +86,12 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
                        return -EINVAL;
                break;
        case PTP_PF_PHYSYNC:
-               pr_err("sorry, cannot reassign the calibration pin\n");
-               return -EINVAL;
+               if (chan != 0)
+                       return -EINVAL;
        default:
                return -EINVAL;
        }
 
-       if (pin2->func == PTP_PF_PHYSYNC) {
-               pr_err("sorry, cannot reprogram the calibration pin\n");
-               return -EINVAL;
-       }
-
        if (info->verify(info, pin, func, chan)) {
                pr_err("driver cannot use function %u on pin %u\n", func, chan);
                return -EOPNOTSUPP;
index d837c3c5330fab5c2c77548f31996db3dacad763..fbc6701bef30811c7359861fcf6b81da39f6476d 100644 (file)
@@ -2915,7 +2915,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        "failed with error code %d\n", ret);
                goto out;
        }
-       dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
+       dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice);
        if (!dev) {
                dev_warn(&cgdev->dev,
                        "Activating the CLAW device failed\n");
index 03b6ad035577e28553da16fbf4481d9c249a9e6d..e056dd4fe44d1814d153d215af21fa13f44888a2 100644 (file)
@@ -1137,9 +1137,11 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                return NULL;
 
        if (IS_MPC(priv))
-               dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
+               dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN,
+                                  ctcm_dev_setup);
        else
-               dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
+               dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN,
+                                  ctcm_dev_setup);
 
        if (!dev) {
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
index ce16d1bdb20a2fb24b3cc557a62c7aba25a8ca58..0a87809c8af7380374b496ab3b90f76a8522ba98 100644 (file)
@@ -2015,7 +2015,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
        struct net_device *dev;
 
        dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
-                          netiucv_setup_netdevice);
+                          NET_NAME_UNKNOWN, netiucv_setup_netdevice);
        if (!dev)
                return NULL;
        rtnl_lock();
index a2088af51cc5d809d7513f0c0cb628c0b81910b6..bbafbd0e017aa8855786b71583bd9593741347a7 100644 (file)
@@ -766,6 +766,11 @@ struct carrier_info {
        __u32 port_speed;
 };
 
+struct qeth_switch_info {
+       __u32 capabilities;
+       __u32 settings;
+};
+
 #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 struct qeth_card {
@@ -946,6 +951,8 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
 int qeth_mdio_read(struct net_device *, int, int);
 int qeth_snmp_command(struct qeth_card *, char __user *);
 int qeth_query_oat_command(struct qeth_card *, char __user *);
+int qeth_query_switch_attributes(struct qeth_card *card,
+                                 struct qeth_switch_info *sw_info);
 int qeth_query_card_info(struct qeth_card *card,
        struct carrier_info *carrier_info);
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
index f54bec54d677635154f190fc73e26f896d17bd34..71bfacfc097e82ea101d56acb9e90a498ab7f8cf 100644 (file)
@@ -3037,6 +3037,45 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
 }
 EXPORT_SYMBOL_GPL(qeth_query_ipassists);
 
+static int qeth_query_switch_attributes_cb(struct qeth_card *card,
+                               struct qeth_reply *reply, unsigned long data)
+{
+       struct qeth_ipa_cmd *cmd;
+       struct qeth_switch_info *sw_info;
+       struct qeth_query_switch_attributes *attrs;
+
+       QETH_CARD_TEXT(card, 2, "qswiatcb");
+       cmd = (struct qeth_ipa_cmd *) data;
+       sw_info = (struct qeth_switch_info *)reply->param;
+       if (cmd->data.setadapterparms.hdr.return_code == 0) {
+               attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+               sw_info->capabilities = attrs->capabilities;
+               sw_info->settings = attrs->settings;
+               QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+                                                       sw_info->settings);
+       }
+       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+
+       return 0;
+}
+
+int qeth_query_switch_attributes(struct qeth_card *card,
+                                struct qeth_switch_info *sw_info)
+{
+       struct qeth_cmd_buffer *iob;
+
+       QETH_CARD_TEXT(card, 2, "qswiattr");
+       if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
+               return -EOPNOTSUPP;
+       if (!netif_carrier_ok(card->dev))
+               return -ENOMEDIUM;
+       iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
+                               sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       return qeth_send_ipa_cmd(card, iob,
+                               qeth_query_switch_attributes_cb, sw_info);
+}
+EXPORT_SYMBOL_GPL(qeth_query_switch_attributes);
+
 static int qeth_query_setdiagass_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
index cf6a90ed42ae3505eedbd12bf9aaec1ade6cfd86..1558be1af72d0ac0b101386a36380e055cf57025 100644 (file)
@@ -242,6 +242,7 @@ enum qeth_ipa_setadp_cmd {
        IPA_SETADP_SET_DIAG_ASSIST              = 0x00002000L,
        IPA_SETADP_SET_ACCESS_CONTROL           = 0x00010000L,
        IPA_SETADP_QUERY_OAT                    = 0x00080000L,
+       IPA_SETADP_QUERY_SWITCH_ATTRIBUTES      = 0x00100000L,
 };
 enum qeth_ipa_mac_ops {
        CHANGE_ADDR_READ_MAC            = 0,
@@ -431,6 +432,21 @@ struct qeth_query_card_info {
        __u32   reserved2;
 };
 
+#define QETH_SWITCH_FORW_802_1         0x00000001
+#define QETH_SWITCH_FORW_REFL_RELAY    0x00000002
+#define QETH_SWITCH_CAP_RTE            0x00000004
+#define QETH_SWITCH_CAP_ECP            0x00000008
+#define QETH_SWITCH_CAP_VDP            0x00000010
+
+struct qeth_query_switch_attributes {
+       __u8  version;
+       __u8  reserved1;
+       __u16 reserved2;
+       __u32 capabilities;
+       __u32 settings;
+       __u8  reserved3[8];
+};
+
 struct qeth_ipacmd_setadpparms_hdr {
        __u32 supp_hw_cmds;
        __u32 reserved1;
@@ -452,6 +468,7 @@ struct qeth_ipacmd_setadpparms {
                struct qeth_set_access_ctrl set_access_ctrl;
                struct qeth_query_oat query_oat;
                struct qeth_query_card_info card_info;
+               struct qeth_query_switch_attributes query_switch_attributes;
                __u32 mode;
        } data;
 } __attribute__ ((packed));
index 8a25a2be9890e7e09af1c9845c5b9b9773472f00..15523f0e4c03666d77d7203102b8f3548cd86e3d 100644 (file)
@@ -543,7 +543,42 @@ out:
 }
 
 static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
-                  qeth_dev_isolation_store);
+                       qeth_dev_isolation_store);
+
+static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct qeth_card *card = dev_get_drvdata(dev);
+       struct qeth_switch_info sw_info;
+       int     rc = 0;
+
+       if (!card)
+               return -EINVAL;
+
+       if (card->state != CARD_STATE_SOFTSETUP && card->state != CARD_STATE_UP)
+               return sprintf(buf, "n/a\n");
+
+       rc = qeth_query_switch_attributes(card, &sw_info);
+       if (rc)
+               return rc;
+
+       if (!sw_info.capabilities)
+               rc = sprintf(buf, "unknown");
+
+       if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
+               rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
+                                                       "[802.1]" : "802.1"));
+       if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
+               rc += sprintf(buf + rc,
+                       (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
+                                                       " [rr]" : " rr"));
+       rc += sprintf(buf + rc, "\n");
+
+       return rc;
+}
+
+static DEVICE_ATTR(switch_attrs, 0444,
+                  qeth_dev_switch_attrs_show, NULL);
 
 static ssize_t qeth_hw_trap_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -728,6 +763,7 @@ static struct attribute *qeth_device_attrs[] = {
        &dev_attr_layer2.attr,
        &dev_attr_isolation.attr,
        &dev_attr_hw_trap.attr,
+       &dev_attr_switch_attrs.attr,
        NULL,
 };
 static struct attribute_group qeth_device_attr_group = {
index 5ef5b4f45758cd226bb58becde5a3fa81ff33bb2..c2679bfe7f6653ba369b8646eada7e554576768a 100644 (file)
@@ -952,10 +952,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 {
        switch (card->info.type) {
        case QETH_CARD_TYPE_IQD:
-               card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+               card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                break;
        case QETH_CARD_TYPE_OSN:
-               card->dev = alloc_netdev(0, "osn%d", ether_setup);
+               card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                card->dev->flags |= IFF_NOARP;
                break;
        default:
index 14e0b5810e8c1cde11a8835552158bab8b59dd1a..f8427a2c4840b9e2453e7a9129068cb69e438397 100644 (file)
@@ -3287,7 +3287,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
-               card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+               card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                if (!card->dev)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
index e8ee5e5fe0efa966c68876c4217b0ff0edba2776..a4a4e98effdd9be2db72d87a5fea3aa9292c97fe 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/tcp.h>
 #include <net/dst.h>
 #include <linux/netdevice.h>
+#include <net/addrconf.h>
 
 #include "t4_regs.h"
 #include "t4_msg.h"
@@ -150,6 +151,7 @@ static struct scsi_transport_template *cxgb4i_stt;
  * The section below implments CPLs that related to iscsi tcp connection
  * open/close/abort and data send/receive.
  */
+
 #define DIV_ROUND_UP(n, d)     (((n) + (d) - 1) / (d))
 #define RCV_BUFSIZ_MASK                0x3FFU
 #define MAX_IMM_TX_PKT_LEN     128
@@ -179,6 +181,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                                struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -248,6 +251,97 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+       pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
+                      (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+                      csk->state, csk->flags, csk->atid, csk->rss_qid);
+
+       cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+}
+
+static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
+                              struct l2t_entry *e)
+{
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       int t4 = is_t4(lldi->adapter_type);
+       int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
+       unsigned long long opt0;
+       unsigned int opt2;
+       unsigned int qid_atid = ((unsigned int)csk->atid) |
+                                (((unsigned int)csk->rss_qid) << 14);
+
+       opt0 = KEEP_ALIVE(1) |
+               WND_SCALE(wscale) |
+               MSS_IDX(csk->mss_idx) |
+               L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
+               TX_CHAN(csk->tx_chan) |
+               SMAC_SEL(csk->smac_idx) |
+               ULP_MODE(ULP_MODE_ISCSI) |
+               RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+
+       opt2 = RX_CHANNEL(0) |
+               RSS_QUEUE_VALID |
+               RX_FC_DISABLE |
+               RSS_QUEUE(csk->rss_qid);
+
+       if (t4) {
+               struct cpl_act_open_req6 *req =
+                           (struct cpl_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                   8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                   8);
+
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= RX_FC_VALID;
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be32(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t));
+       } else {
+               struct cpl_t5_act_open_req6 *req =
+                               (struct cpl_t5_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= T5_OPT_2_VALID;
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t)));
+       }
+
+       set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+       pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
+               t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+               &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
+               &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
+               csk->rss_qid);
+
        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 }
 
@@ -586,9 +680,11 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
-               csk, csk->state, csk->flags, tid, atid, rcv_isn);
+       pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      atid, tid, csk, csk->state, csk->flags, rcv_isn);
+
+       module_put(THIS_MODULE);
 
        cxgbi_sock_get(csk);
        csk->tid = tid;
@@ -663,6 +759,9 @@ static void csk_act_open_retry_timer(unsigned long data)
        struct sk_buff *skb;
        struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
+                                  struct l2t_entry *);
+       int t4 = is_t4(lldi->adapter_type), size, size6;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
@@ -670,20 +769,35 @@ static void csk_act_open_retry_timer(unsigned long data)
 
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
-       skb = alloc_wr(is_t4(lldi->adapter_type) ?
-                               sizeof(struct cpl_act_open_req) :
-                               sizeof(struct cpl_t5_act_open_req),
-                       0, GFP_ATOMIC);
+
+       if (t4) {
+               size = sizeof(struct cpl_act_open_req);
+               size6 = sizeof(struct cpl_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t5_act_open_req);
+               size6 = sizeof(struct cpl_t5_act_open_req6);
+       }
+
+       if (csk->csk_family == AF_INET) {
+               send_act_open_func = send_act_open_req;
+               skb = alloc_wr(size, 0, GFP_ATOMIC);
+       } else {
+               send_act_open_func = send_act_open_req6;
+               skb = alloc_wr(size6, 0, GFP_ATOMIC);
+       }
+
        if (!skb)
                cxgbi_sock_fail_act_open(csk, -ENOMEM);
        else {
                skb->sk = (struct sock *)csk;
                t4_set_arp_err_handler(skb, csk,
-                                       cxgbi_sock_act_open_req_arp_failure);
-               send_act_open_req(csk, skb, csk->l2t);
+                                      cxgbi_sock_act_open_req_arp_failure);
+               send_act_open_func(csk, skb, csk->l2t);
        }
+
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
+
 }
 
 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
@@ -703,10 +817,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
-               &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
-               &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
-               atid, tid, status, csk, csk->state, csk->flags);
+       pr_info_ipaddr("tid %u/%u, status %u.\n"
+                      "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
+                      atid, tid, status, csk, csk->state, csk->flags);
 
        if (status == CPL_ERR_RTX_NEG_ADVICE)
                goto rel_skb;
@@ -746,9 +859,9 @@ static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
                pr_err("can't find connection for tid %u.\n", tid);
                goto rel_skb;
        }
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx,%u.\n",
-               csk, csk->state, csk->flags, csk->tid);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid);
        cxgbi_sock_rcv_peer_close(csk);
 rel_skb:
        __kfree_skb(skb);
@@ -767,9 +880,9 @@ static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
                pr_err("can't find connection for tid %u.\n", tid);
                goto rel_skb;
        }
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx,%u.\n",
-               csk, csk->state, csk->flags, csk->tid);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid);
        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
 rel_skb:
        __kfree_skb(skb);
@@ -808,9 +921,9 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
-               csk, csk->state, csk->flags, csk->tid, req->status);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid, req->status);
 
        if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
            req->status == CPL_ERR_PERSIST_NEG_ADVICE)
@@ -851,10 +964,10 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (!csk)
                goto rel_skb;
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
-               rpl->status, csk, csk ? csk->state : 0,
-               csk ? csk->flags : 0UL);
+       if (csk)
+               pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+                              (&csk->saddr), (&csk->daddr), csk,
+                              csk->state, csk->flags, csk->tid, rpl->status);
 
        if (rpl->status == CPL_ERR_ABORT_FAILED)
                goto rel_skb;
@@ -1163,15 +1276,35 @@ static int init_act_open(struct cxgbi_sock *csk)
        struct cxgbi_device *cdev = csk->cdev;
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct net_device *ndev = cdev->ports[csk->port_id];
-       struct port_info *pi = netdev_priv(ndev);
        struct sk_buff *skb = NULL;
-       struct neighbour *n;
+       struct neighbour *n = NULL;
+       void *daddr;
        unsigned int step;
+       unsigned int size, size6;
+       int t4 = is_t4(lldi->adapter_type);
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
                csk, csk->state, csk->flags, csk->tid);
 
+       if (csk->csk_family == AF_INET)
+               daddr = &csk->daddr.sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (csk->csk_family == AF_INET6)
+               daddr = &csk->daddr6.sin6_addr;
+#endif
+       else {
+               pr_err("address family 0x%x not supported\n", csk->csk_family);
+               goto rel_resource;
+       }
+
+       n = dst_neigh_lookup(csk->dst, daddr);
+
+       if (!n) {
+               pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+               goto rel_resource;
+       }
+
        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
        if (csk->atid < 0) {
                pr_err("%s, NO atid available.\n", ndev->name);
@@ -1192,10 +1325,19 @@ static int init_act_open(struct cxgbi_sock *csk)
        }
        cxgbi_sock_get(csk);
 
-       skb = alloc_wr(is_t4(lldi->adapter_type) ?
-                               sizeof(struct cpl_act_open_req) :
-                               sizeof(struct cpl_t5_act_open_req),
-                       0, GFP_ATOMIC);
+       if (t4) {
+               size = sizeof(struct cpl_act_open_req);
+               size6 = sizeof(struct cpl_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t5_act_open_req);
+               size6 = sizeof(struct cpl_t5_act_open_req6);
+       }
+
+       if (csk->csk_family == AF_INET)
+               skb = alloc_wr(size, 0, GFP_NOIO);
+       else
+               skb = alloc_wr(size6, 0, GFP_NOIO);
+
        if (!skb)
                goto rel_resource;
        skb->sk = (struct sock *)csk;
@@ -1211,19 +1353,27 @@ static int init_act_open(struct cxgbi_sock *csk)
        csk->txq_idx = cxgb4_port_idx(ndev) * step;
        step = lldi->nrxq / lldi->nchan;
        csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
-       csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
+       csk->wr_cred = lldi->wr_cred -
+                      DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+       csk->wr_max_cred = csk->wr_cred;
        csk->wr_una_cred = 0;
        cxgbi_sock_reset_wr_list(csk);
        csk->err = 0;
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
-               csk, pi->port_id, ndev->name, csk->tx_chan,
-               csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
-               csk->smac_idx);
 
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
+                      (&csk->saddr), (&csk->daddr), csk, csk->state,
+                      csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
+                      csk->mtu, csk->mss_idx, csk->smac_idx);
+
+       /* must wait for either a act_open_rpl or act_open_establish */
+       try_module_get(THIS_MODULE);
        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
-       send_act_open_req(csk, skb, csk->l2t);
+       if (csk->csk_family == AF_INET)
+               send_act_open_req(csk, skb, csk->l2t);
+       else
+               send_act_open_req6(csk, skb, csk->l2t);
        neigh_release(n);
+
        return 0;
 
 rel_resource:
@@ -1487,6 +1637,133 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static int cxgbi_inet6addr_handler(struct notifier_block *this,
+                                  unsigned long event, void *data)
+{
+       struct inet6_ifaddr *ifa = data;
+       struct net_device *event_dev = ifa->idev->dev;
+       struct cxgbi_device *cdev;
+       int ret = NOTIFY_DONE;
+
+       rcu_read_lock();
+
+       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+               event_dev = vlan_dev_real_dev(event_dev);
+
+       cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
+       if (!cdev) {
+               rcu_read_unlock();
+               return ret;
+       }
+       switch (event) {
+       case NETDEV_UP:
+               ret = cxgb4_clip_get(event_dev,
+                                    (const struct in6_addr *)
+                                    ((ifa)->addr.s6_addr));
+               if (ret < 0) {
+                       rcu_read_unlock();
+                       return ret;
+               }
+               ret = NOTIFY_OK;
+               break;
+
+       case NETDEV_DOWN:
+               cxgb4_clip_release(event_dev,
+                                  (const struct in6_addr *)
+                                  ((ifa)->addr.s6_addr));
+               ret = NOTIFY_OK;
+               break;
+
+       default:
+               break;
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+
+static struct notifier_block cxgbi_inet6addr_notifier = {
+       .notifier_call = cxgbi_inet6addr_handler
+};
+
+/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actual CLIP command.
+ */
+static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
+{
+       struct inet6_dev *idev = NULL;
+       struct inet6_ifaddr *ifa;
+       int ret = 0;
+
+       idev = __in6_dev_get(root_dev);
+       if (!idev)
+               return ret;
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               pr_info("updating the clip for addr %pI6\n",
+                       ifa->addr.s6_addr);
+               ret = cxgb4_clip_get(dev, (const struct in6_addr *)
+                                    ifa->addr.s6_addr);
+               if (ret < 0)
+                       break;
+       }
+
+       read_unlock_bh(&idev->lock);
+       return ret;
+}
+
+static int update_root_dev_clip(struct net_device *dev)
+{
+       struct net_device *root_dev = NULL;
+       int i, ret = 0;
+
+       /* First populate the real net device's IPv6 address */
+       ret = update_dev_clip(dev, dev);
+       if (ret)
+               return ret;
+
+       /* Parse all bond and vlan devices layered on top of the physical dev */
+       root_dev = netdev_master_upper_dev_get(dev);
+       if (root_dev) {
+               ret = update_dev_clip(root_dev, dev);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < VLAN_N_VID; i++) {
+               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
+               if (!root_dev)
+                       continue;
+
+               ret = update_dev_clip(root_dev, dev);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+static void cxgbi_update_clip(struct cxgbi_device *cdev)
+{
+       int i;
+
+       rcu_read_lock();
+
+       for (i = 0; i < cdev->nports; i++) {
+               struct net_device *dev = cdev->ports[i];
+               int ret = 0;
+
+               if (dev)
+                       ret = update_root_dev_clip(dev);
+               if (ret < 0)
+                       break;
+       }
+       rcu_read_unlock();
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
 {
        struct cxgbi_device *cdev;
@@ -1605,6 +1882,9 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
        switch (state) {
        case CXGB4_STATE_UP:
                pr_info("cdev 0x%p, UP.\n", cdev);
+#if IS_ENABLED(CONFIG_IPV6)
+               cxgbi_update_clip(cdev);
+#endif
                /* re-initialize */
                break;
        case CXGB4_STATE_START_RECOVERY:
@@ -1635,11 +1915,18 @@ static int __init cxgb4i_init_module(void)
        if (rc < 0)
                return rc;
        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
+#endif
        return 0;
 }
 
 static void __exit cxgb4i_exit_module(void)
 {
+#if IS_ENABLED(CONFIG_IPV6)
+       unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
+#endif
        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
index b44c1cff3114bb9a6aa6467540a3755d722ef5b3..3d5322d59f1510a87292e78c2a7cbb6aa983f31b 100644 (file)
 #include <linux/inet.h>
 #include <net/dst.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
 #include <linux/inetdevice.h>  /* ip_dev_find */
 #include <linux/module.h>
 #include <net/tcp.h>
@@ -193,8 +197,8 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
 
-static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
-                                                       int *port)
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
+                                                int *port)
 {
        struct net_device *vdev = NULL;
        struct cxgbi_device *cdev, *tmp;
@@ -224,6 +228,40 @@ static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
                "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
+
+static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
+                                                    int *port)
+{
+       struct net_device *vdev = NULL;
+       struct cxgbi_device *cdev, *tmp;
+       int i;
+
+       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+               vdev = ndev;
+               ndev = vlan_dev_real_dev(ndev);
+               pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+       }
+
+       mutex_lock(&cdev_mutex);
+       list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+               for (i = 0; i < cdev->nports; i++) {
+                       if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
+                                   MAX_ADDR_LEN)) {
+                               cdev->hbas[i]->vdev = vdev;
+                               mutex_unlock(&cdev_mutex);
+                               if (port)
+                                       *port = i;
+                               return cdev;
+                       }
+               }
+       }
+       mutex_unlock(&cdev_mutex);
+       log_debug(1 << CXGBI_DBG_DEV,
+                 "ndev 0x%p, %s, NO match mac found.\n",
+                 ndev, ndev->name);
+       return NULL;
+}
 
 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
 {
@@ -320,6 +358,7 @@ static int sock_get_port(struct cxgbi_sock *csk)
        struct cxgbi_ports_map *pmap = &cdev->pmap;
        unsigned int start;
        int idx;
+       __be16 *port;
 
        if (!pmap->max_connect) {
                pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
@@ -327,9 +366,14 @@ static int sock_get_port(struct cxgbi_sock *csk)
                return -EADDRNOTAVAIL;
        }
 
-       if (csk->saddr.sin_port) {
+       if (csk->csk_family == AF_INET)
+               port = &csk->saddr.sin_port;
+       else /* ipv6 */
+               port = &csk->saddr6.sin6_port;
+
+       if (*port) {
                pr_err("source port NON-ZERO %u.\n",
-                       ntohs(csk->saddr.sin_port));
+                       ntohs(*port));
                return -EADDRINUSE;
        }
 
@@ -347,8 +391,7 @@ static int sock_get_port(struct cxgbi_sock *csk)
                        idx = 0;
                if (!pmap->port_csk[idx]) {
                        pmap->used++;
-                       csk->saddr.sin_port =
-                               htons(pmap->sport_base + idx);
+                       *port = htons(pmap->sport_base + idx);
                        pmap->next = idx;
                        pmap->port_csk[idx] = csk;
                        spin_unlock_bh(&pmap->lock);
@@ -374,16 +417,22 @@ static void sock_put_port(struct cxgbi_sock *csk)
 {
        struct cxgbi_device *cdev = csk->cdev;
        struct cxgbi_ports_map *pmap = &cdev->pmap;
+       __be16 *port;
 
-       if (csk->saddr.sin_port) {
-               int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
+       if (csk->csk_family == AF_INET)
+               port = &csk->saddr.sin_port;
+       else /* ipv6 */
+               port = &csk->saddr6.sin6_port;
 
-               csk->saddr.sin_port = 0;
+       if (*port) {
+               int idx = ntohs(*port) - pmap->sport_base;
+
+               *port = 0;
                if (idx < 0 || idx >= pmap->max_connect) {
                        pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
                                cdev, csk->port_id,
                                cdev->ports[csk->port_id]->name,
-                               ntohs(csk->saddr.sin_port));
+                               ntohs(*port));
                        return;
                }
 
@@ -479,17 +528,11 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        int port = 0xFFFF;
        int err = 0;
 
-       if (daddr->sin_family != AF_INET) {
-               pr_info("address family 0x%x NOT supported.\n",
-                       daddr->sin_family);
-               err = -EAFNOSUPPORT;
-               goto err_out;
-       }
-
        rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
        if (!rt) {
                pr_info("no route to ipv4 0x%x, port %u.\n",
-                       daddr->sin_addr.s_addr, daddr->sin_port);
+                       be32_to_cpu(daddr->sin_addr.s_addr),
+                       be16_to_cpu(daddr->sin_port));
                err = -ENETUNREACH;
                goto err_out;
        }
@@ -537,9 +580,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        csk->port_id = port;
        csk->mtu = mtu;
        csk->dst = dst;
+
+       csk->csk_family = AF_INET;
        csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
        csk->daddr.sin_port = daddr->sin_port;
        csk->daddr.sin_family = daddr->sin_family;
+       csk->saddr.sin_family = daddr->sin_family;
        csk->saddr.sin_addr.s_addr = fl4.saddr;
        neigh_release(n);
 
@@ -556,6 +602,123 @@ err_out:
        return ERR_PTR(err);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
+                                       const struct in6_addr *daddr)
+{
+       struct flowi6 fl;
+
+       if (saddr)
+               memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
+       if (daddr)
+               memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
+       return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+}
+
+static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
+{
+       struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
+       struct dst_entry *dst;
+       struct net_device *ndev;
+       struct cxgbi_device *cdev;
+       struct rt6_info *rt = NULL;
+       struct neighbour *n;
+       struct in6_addr pref_saddr;
+       struct cxgbi_sock *csk = NULL;
+       unsigned int mtu = 0;
+       int port = 0xFFFF;
+       int err = 0;
+
+       rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
+
+       if (!rt) {
+               pr_info("no route to ipv6 %pI6 port %u\n",
+                       daddr6->sin6_addr.s6_addr,
+                       be16_to_cpu(daddr6->sin6_port));
+               err = -ENETUNREACH;
+               goto err_out;
+       }
+
+       dst = &rt->dst;
+
+       n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
+
+       if (!n) {
+               pr_info("%pI6, port %u, dst no neighbour.\n",
+                       daddr6->sin6_addr.s6_addr,
+                       be16_to_cpu(daddr6->sin6_port));
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+       ndev = n->dev;
+
+       if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+               pr_info("multi-cast route %pI6 port %u, dev %s.\n",
+                       daddr6->sin6_addr.s6_addr,
+                       ntohs(daddr6->sin6_port), ndev->name);
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+
+       cdev = cxgbi_device_find_by_netdev(ndev, &port);
+       if (!cdev)
+               cdev = cxgbi_device_find_by_mac(ndev, &port);
+       if (!cdev) {
+               pr_info("dst %pI6 %s, NOT cxgbi device.\n",
+                       daddr6->sin6_addr.s6_addr, ndev->name);
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+       log_debug(1 << CXGBI_DBG_SOCK,
+                 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
+                 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
+                 ndev->name, cdev);
+
+       csk = cxgbi_sock_create(cdev);
+       if (!csk) {
+               err = -ENOMEM;
+               goto rel_rt;
+       }
+       csk->cdev = cdev;
+       csk->port_id = port;
+       csk->mtu = mtu;
+       csk->dst = dst;
+
+       if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
+               struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
+
+               err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
+                                        &daddr6->sin6_addr, 0, &pref_saddr);
+               if (err) {
+                       pr_info("failed to get source address to reach %pI6\n",
+                               &daddr6->sin6_addr);
+                       goto rel_rt;
+               }
+       } else {
+               pref_saddr = rt->rt6i_prefsrc.addr;
+       }
+
+       csk->csk_family = AF_INET6;
+       csk->daddr6.sin6_addr = daddr6->sin6_addr;
+       csk->daddr6.sin6_port = daddr6->sin6_port;
+       csk->daddr6.sin6_family = daddr6->sin6_family;
+       csk->saddr6.sin6_addr = pref_saddr;
+
+       neigh_release(n);
+       return csk;
+
+rel_rt:
+       if (n)
+               neigh_release(n);
+
+       ip6_rt_put(rt);
+       if (csk)
+               cxgbi_sock_closed(csk);
+err_out:
+       return ERR_PTR(err);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
                        unsigned int opt)
 {
@@ -2194,6 +2357,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
 }
 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
 
+static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
+{
+       int len;
+
+       cxgbi_sock_get(csk);
+       len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
+       cxgbi_sock_put(csk);
+
+       return len;
+}
+
+static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
+{
+       int len;
+
+       cxgbi_sock_get(csk);
+       if (csk->csk_family == AF_INET)
+               len = sprintf(buf, "%pI4",
+                             &csk->daddr.sin_addr.s_addr);
+       else
+               len = sprintf(buf, "%pI6",
+                             &csk->daddr6.sin6_addr);
+
+       cxgbi_sock_put(csk);
+
+       return len;
+}
+
 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
                       char *buf)
 {
@@ -2447,7 +2638,19 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
                }
        }
 
-       csk = cxgbi_check_route(dst_addr);
+       if (dst_addr->sa_family == AF_INET) {
+               csk = cxgbi_check_route(dst_addr);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (dst_addr->sa_family == AF_INET6) {
+               csk = cxgbi_check_route6(dst_addr);
+#endif
+       } else {
+               pr_info("address family 0x%x NOT supported.\n",
+                       dst_addr->sa_family);
+               err = -EAFNOSUPPORT;
+               return (struct iscsi_endpoint *)ERR_PTR(err);
+       }
+
        if (IS_ERR(csk))
                return (struct iscsi_endpoint *)csk;
        cxgbi_sock_get(csk);
index 8135f04671af3d679400a3d72396aebadd5dc96e..8ad73d913f0241b3470afc62d68b660d462c49df 100644 (file)
@@ -44,6 +44,15 @@ enum cxgbi_dbg_flag {
                        pr_info(fmt, ##__VA_ARGS__); \
        } while (0)
 
+#define pr_info_ipaddr(fmt_trail,                                      \
+                       addr1, addr2, args_trail...)                    \
+do {                                                                   \
+       if (!((1 << CXGBI_DBG_SOCK) & dbg_level))                       \
+               break;                                                  \
+       pr_info("%pISpc - %pISpc, " fmt_trail,                          \
+               addr1, addr2, args_trail);                              \
+} while (0)
+
 /* max. connections per adapter */
 #define CXGBI_MAX_CONN         16384
 
@@ -202,8 +211,15 @@ struct cxgbi_sock {
        spinlock_t lock;
        struct kref refcnt;
        unsigned int state;
-       struct sockaddr_in saddr;
-       struct sockaddr_in daddr;
+       unsigned int csk_family;
+       union {
+               struct sockaddr_in saddr;
+               struct sockaddr_in6 saddr6;
+       };
+       union {
+               struct sockaddr_in daddr;
+               struct sockaddr_in6 daddr6;
+       };
        struct dst_entry *dst;
        struct sk_buff_head receive_queue;
        struct sk_buff_head write_queue;
@@ -692,6 +708,7 @@ struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
 void cxgbi_device_unregister(struct cxgbi_device *);
 void cxgbi_device_unregister_all(unsigned int flag);
 struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
 int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int,
                        struct scsi_host_template *,
                        struct scsi_transport_template *);
index 09f3d5ca75ac6737b9fc3efbff0243d28cc13bb6..85d776bbfb15896245dc2880d12fd26589b6b8bd 100644 (file)
@@ -917,7 +917,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
        struct net_device *ndev;
        ci_t       *ci;
 
-       ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
+       ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, NET_NAME_UNKNOWN,
+                           c4_setup);
        if (!ndev) {
                pr_warning("%s: no memory for struct net_device !\n",
                           hi->devname);
index 64c55b99fda4594725112d0d17b299ffe78bdbfb..c2268527422f417487b8580cf301ace6e44b849c 100644 (file)
@@ -885,7 +885,7 @@ int register_lte_device(struct phy_dev *phy_dev,
 
                /* Allocate netdev */
                net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
-                               ether_setup);
+                                  NET_NAME_UNKNOWN, ether_setup);
                if (net == NULL) {
                        pr_err("alloc_netdev failed\n");
                        ret = -ENOMEM;
index e5e511585122a3a25c5d6ad203d4c5f185755f3f..a9a6fc51024bbabb109812a3d16ea29f23b7cec6 100644 (file)
@@ -886,7 +886,8 @@ int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev)
        struct net_device *dev;
        int ret;
 
-       dev = alloc_netdev(sizeof(*nic), "wm%d", ether_setup);
+       dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN,
+                          ether_setup);
 
        if (dev == NULL) {
                pr_err("alloc_etherdev failed\n");
index 8392d4d1d5ed75e83ff60123bc66f9f17132b016..0814bfd68b2e726067a53588a1bd558f78951afe 100644 (file)
@@ -89,7 +89,8 @@ static int wpa_init_wpadev(PSDevice pDevice)
        struct net_device *dev = pDevice->dev;
        int ret = 0;
 
-       pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
+       pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa",
+                                      NET_NAME_UNKNOWN, wpadev_setup);
        if (pDevice->wpadev == NULL)
                return -ENOMEM;
 
index 00b186c59725d2888c0b3b649c94d0397bfc6229..6c78f917e24acd270f17097c1432fa83dcc6e94b 100644 (file)
@@ -769,7 +769,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
 
        /* Allocate and initialize the struct device */
        netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
-                               ether_setup);
+                             NET_NAME_UNKNOWN, ether_setup);
        if (netdev == NULL) {
                dev_err(physdev, "Failed to alloc netdev.\n");
                wlan_free_wiphy(wiphy);
index 2ebe47b78a3e3ba48093d46164bee6247a32295e..cde3ab97900f6c804b6779ca6d58b2546be5201c 100644 (file)
@@ -2789,9 +2789,8 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
        netname = "gsm%d";
        if (nc->if_name[0] != '\0')
                netname = nc->if_name;
-       net = alloc_netdev(sizeof(struct gsm_mux_net),
-                       netname,
-                       gsm_mux_net_init);
+       net = alloc_netdev(sizeof(struct gsm_mux_net), netname,
+                          NET_NAME_UNKNOWN, gsm_mux_net_init);
        if (!net) {
                pr_err("alloc_netdev failed");
                return -ENOMEM;
index f2b781773eed39da911bd7a6a4f4f5500e46fbbf..b9cfc1571d71281c0607abf0420fb518796626fe 100644 (file)
@@ -721,7 +721,8 @@ struct net_device *gphonet_setup_default(void)
        struct phonet_port *port;
 
        /* Create net device */
-       dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
+       dev = alloc_netdev(sizeof(*port), "upnlink%d", NET_NAME_UNKNOWN,
+                          pn_net_setup);
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
index 7216b0daf544a35891c95492b1f9bbea7da65447..df0356220730601bd86fb644c2da629c5ddb1a35 100644 (file)
 #ifdef __KERNEL__
 #include  <linux/irqreturn.h>
 
-#ifndef bool
-#define bool int
-#endif
-
 /*
  * RECON_THRESHOLD is the maximum number of RECON messages to receive
  * within one minute before printing a "cabling problem" warning. The
@@ -285,9 +281,9 @@ struct arcnet_local {
        unsigned long first_recon; /* time of "first" RECON message to count */
        unsigned long last_recon;  /* time of most recent RECON */
        int num_recons;         /* number of RECONs between first and last. */
-       bool network_down;      /* do we think the network is down? */
+       int network_down;       /* do we think the network is down? */
 
-       bool excnak_pending;    /* We just got an excesive nak interrupt */
+       int excnak_pending;    /* We just got an excesive nak interrupt */
 
        struct {
                uint16_t sequence;      /* sequence number (incs with each packet) */
@@ -305,7 +301,7 @@ struct arcnet_local {
                void (*command) (struct net_device * dev, int cmd);
                int (*status) (struct net_device * dev);
                void (*intmask) (struct net_device * dev, int mask);
-               bool (*reset) (struct net_device * dev, bool really_reset);
+               int (*reset) (struct net_device * dev, int really_reset);
                void (*open) (struct net_device * dev);
                void (*close) (struct net_device * dev);
 
index 0b3bb16c705a2eadc841d56bf177a900e7a8166e..452286a38b2b9f751600fb2d96a9dc13bafcd36a 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/bcma/bcma_driver_chipcommon.h>
 #include <linux/bcma/bcma_driver_pci.h>
+#include <linux/bcma/bcma_driver_pcie2.h>
 #include <linux/bcma/bcma_driver_mips.h>
 #include <linux/bcma/bcma_driver_gmac_cmn.h>
 #include <linux/ssb/ssb.h> /* SPROM sharing */
@@ -333,6 +334,7 @@ struct bcma_bus {
 
        struct bcma_drv_cc drv_cc;
        struct bcma_drv_pci drv_pci[2];
+       struct bcma_drv_pcie2 drv_pcie2;
        struct bcma_drv_mips drv_mips;
        struct bcma_drv_gmac_cmn drv_gmac_cmn;
 
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
new file mode 100644 (file)
index 0000000..5988b05
--- /dev/null
@@ -0,0 +1,158 @@
+#ifndef LINUX_BCMA_DRIVER_PCIE2_H_
+#define LINUX_BCMA_DRIVER_PCIE2_H_
+
+#define BCMA_CORE_PCIE2_CLK_CONTROL            0x0000
+#define  PCIE2_CLKC_RST_OE                     0x0001 /* When set, drives PCI_RESET out to pin */
+#define  PCIE2_CLKC_RST                                0x0002 /* Value driven out to pin */
+#define  PCIE2_CLKC_SPERST                     0x0004 /* SurvivePeRst */
+#define  PCIE2_CLKC_DISABLE_L1CLK_GATING       0x0010
+#define  PCIE2_CLKC_DLYPERST                   0x0100 /* Delay PeRst to CoE Core */
+#define  PCIE2_CLKC_DISSPROMLD                 0x0200 /* DisableSpromLoadOnPerst */
+#define  PCIE2_CLKC_WAKE_MODE_L2               0x1000 /* Wake on L2 */
+#define BCMA_CORE_PCIE2_RC_PM_CONTROL          0x0004
+#define BCMA_CORE_PCIE2_RC_PM_STATUS           0x0008
+#define BCMA_CORE_PCIE2_EP_PM_CONTROL          0x000C
+#define BCMA_CORE_PCIE2_EP_PM_STATUS           0x0010
+#define BCMA_CORE_PCIE2_EP_LTR_CONTROL         0x0014
+#define BCMA_CORE_PCIE2_EP_LTR_STATUS          0x0018
+#define BCMA_CORE_PCIE2_EP_OBFF_STATUS         0x001C
+#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS                0x0020
+#define BCMA_CORE_PCIE2_RC_AXI_CONFIG          0x0100
+#define BCMA_CORE_PCIE2_EP_AXI_CONFIG          0x0104
+#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0                0x0108
+#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0       0x010C
+#define BCMA_CORE_PCIE2_CONFIGINDADDR          0x0120
+#define BCMA_CORE_PCIE2_CONFIGINDDATA          0x0124
+#define BCMA_CORE_PCIE2_MDIOCONTROL            0x0128
+#define BCMA_CORE_PCIE2_MDIOWRDATA             0x012C
+#define BCMA_CORE_PCIE2_MDIORDDATA             0x0130
+#define BCMA_CORE_PCIE2_DATAINTF               0x0180
+#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0         0x0188
+#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0         0x018c
+#define BCMA_CORE_PCIE2_H2D_INTSTAT_0          0x0190
+#define BCMA_CORE_PCIE2_H2D_INTMASK_0          0x0194
+#define BCMA_CORE_PCIE2_D2H_INTSTAT_0          0x0198
+#define BCMA_CORE_PCIE2_D2H_INTMASK_0          0x019c
+#define BCMA_CORE_PCIE2_LTR_STATE              0x01A0 /* Latency Tolerance Reporting */
+#define  PCIE2_LTR_ACTIVE                      2
+#define  PCIE2_LTR_ACTIVE_IDLE                 1
+#define  PCIE2_LTR_SLEEP                       0
+#define  PCIE2_LTR_FINAL_MASK                  0x300
+#define  PCIE2_LTR_FINAL_SHIFT                 8
+#define BCMA_CORE_PCIE2_PWR_INT_STATUS         0x01A4
+#define BCMA_CORE_PCIE2_PWR_INT_MASK           0x01A8
+#define BCMA_CORE_PCIE2_CFG_ADDR               0x01F8
+#define BCMA_CORE_PCIE2_CFG_DATA               0x01FC
+#define BCMA_CORE_PCIE2_SYS_EQ_PAGE            0x0200
+#define BCMA_CORE_PCIE2_SYS_MSI_PAGE           0x0204
+#define BCMA_CORE_PCIE2_SYS_MSI_INTREN         0x0208
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0          0x0210
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1          0x0214
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2          0x0218
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3          0x021C
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4          0x0220
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5          0x0224
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0           0x0250
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0           0x0254
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1           0x0258
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1           0x025C
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2           0x0260
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2           0x0264
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3           0x0268
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3           0x026C
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4           0x0270
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4           0x0274
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5           0x0278
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5           0x027C
+#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN         0x0330
+#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR                0x0334
+#define BCMA_CORE_PCIE2_SYS_MSI_REQ            0x0340
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN       0x0344
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR      0x0348
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR0         0x0350
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR1         0x0354
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR2         0x0358
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR3         0x035C
+#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0         0x0360
+#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1         0x0364
+#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0                0x0370
+#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1                0x0374
+#define BCMA_CORE_PCIE2_SPROM(wordoffset)      (0x0800 + ((wordoffset) * 2))
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0          0x0C00
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1          0x0C04
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2          0x0C08
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3          0x0C0C
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4          0x0C10
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5          0x0C14
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6          0x0C18
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7          0x0C1C
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0          0x0C20
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1          0x0C24
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2          0x0C28
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3          0x0C2C
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4          0x0C30
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5          0x0C34
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6          0x0C38
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7          0x0C3C
+#define BCMA_CORE_PCIE2_FUNC0_IMAP1            0x0C80
+#define BCMA_CORE_PCIE2_FUNC1_IMAP1            0x0C88
+#define BCMA_CORE_PCIE2_FUNC0_IMAP2            0x0CC0
+#define BCMA_CORE_PCIE2_FUNC1_IMAP2            0x0CC8
+#define BCMA_CORE_PCIE2_IARR0_LOWER            0x0D00
+#define BCMA_CORE_PCIE2_IARR0_UPPER            0x0D04
+#define BCMA_CORE_PCIE2_IARR1_LOWER            0x0D08
+#define BCMA_CORE_PCIE2_IARR1_UPPER            0x0D0C
+#define BCMA_CORE_PCIE2_IARR2_LOWER            0x0D10
+#define BCMA_CORE_PCIE2_IARR2_UPPER            0x0D14
+#define BCMA_CORE_PCIE2_OARR0                  0x0D20
+#define BCMA_CORE_PCIE2_OARR1                  0x0D28
+#define BCMA_CORE_PCIE2_OARR2                  0x0D30
+#define BCMA_CORE_PCIE2_OMAP0_LOWER            0x0D40
+#define BCMA_CORE_PCIE2_OMAP0_UPPER            0x0D44
+#define BCMA_CORE_PCIE2_OMAP1_LOWER            0x0D48
+#define BCMA_CORE_PCIE2_OMAP1_UPPER            0x0D4C
+#define BCMA_CORE_PCIE2_OMAP2_LOWER            0x0D50
+#define BCMA_CORE_PCIE2_OMAP2_UPPER            0x0D54
+#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE       0x0D58
+#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE       0x0D5C
+#define BCMA_CORE_PCIE2_MEM_CONTROL            0x0F00
+#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0                0x0F04
+#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1                0x0F08
+#define BCMA_CORE_PCIE2_LINK_STATUS            0x0F0C
+#define BCMA_CORE_PCIE2_STRAP_STATUS           0x0F10
+#define BCMA_CORE_PCIE2_RESET_STATUS           0x0F14
+#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN    0x0F18
+#define BCMA_CORE_PCIE2_MISC_INTR_EN           0x0F1C
+#define BCMA_CORE_PCIE2_TX_DEBUG_CFG           0x0F20
+#define BCMA_CORE_PCIE2_MISC_CONFIG            0x0F24
+#define BCMA_CORE_PCIE2_MISC_STATUS            0x0F28
+#define BCMA_CORE_PCIE2_INTR_EN                        0x0F30
+#define BCMA_CORE_PCIE2_INTR_CLEAR             0x0F34
+#define BCMA_CORE_PCIE2_INTR_STATUS            0x0F38
+
+/* PCIE gen2 config regs */
+#define PCIE2_INTSTATUS                                0x090
+#define PCIE2_INTMASK                          0x094
+#define PCIE2_SBMBX                            0x098
+
+#define PCIE2_PMCR_REFUP                       0x1814 /* Trefup time */
+
+#define PCIE2_CAP_DEVSTSCTRL2_OFFSET           0xD4
+#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB          0x400
+#define PCIE2_PVT_REG_PM_CLK_PERIOD            0x184c
+
+struct bcma_drv_pcie2 {
+       struct bcma_device *core;
+};
+
+#define pcie2_read16(pcie2, offset)            bcma_read16((pcie2)->core, offset)
+#define pcie2_read32(pcie2, offset)            bcma_read32((pcie2)->core, offset)
+#define pcie2_write16(pcie2, offset, val)      bcma_write16((pcie2)->core, offset, val)
+#define pcie2_write32(pcie2, offset, val)      bcma_write32((pcie2)->core, offset, val)
+
+#define pcie2_set32(pcie2, offset, set)                bcma_set32((pcie2)->core, offset, set)
+#define pcie2_mask32(pcie2, offset, mask)      bcma_mask32((pcie2)->core, offset, mask)
+
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
+
+#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
index 7d275c4fc01108f508edc2da38a6c60500cd331c..9e8a032c17887b6b9190e7d85fdf4a85c4c07904 100644 (file)
@@ -8,8 +8,8 @@
 #include <linux/types.h>
 #include <linux/bitrev.h>
 
-extern u32  crc32_le(u32 crc, unsigned char const *p, size_t len);
-extern u32  crc32_be(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
 
 /**
  * crc32_le_combine - Combine two crc32 check values into one. For two
@@ -29,9 +29,14 @@ extern u32  crc32_be(u32 crc, unsigned char const *p, size_t len);
  *        with the same initializer as crc1, and crc2 seed was 0. See
  *        also crc32_combine_test().
  */
-extern u32  crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
 
-extern u32  __crc32c_le(u32 crc, unsigned char const *p, size_t len);
+static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+       return crc32_le_shift(crc1, len2) ^ crc2;
+}
+
+u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
 
 /**
  * __crc32c_le_combine - Combine two crc32c check values into one. For two
@@ -51,7 +56,12 @@ extern u32  __crc32c_le(u32 crc, unsigned char const *p, size_t len);
  *        seeded with the same initializer as crc1, and crc2 seed
  *        was 0. See also crc32c_combine_test().
  */
-extern u32  __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+
+static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+       return __crc32c_le_shift(crc1, len2) ^ crc2;
+}
 
 #define crc32(seed, data, length)  crc32_le(seed, (unsigned char const *)(data), length)
 
index a7e3c48d73a70677f88fcbf68c89e62f1caf159f..c43c8258e682c5f1d55539c867523264ca8eb293 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/atomic.h>
 #include <linux/compat.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
@@ -361,7 +362,7 @@ void sk_unattached_filter_destroy(struct sk_filter *fp);
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_detach_filter(struct sock *sk);
 
-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
+int sk_chk_filter(const struct sock_filter *filter, unsigned int flen);
 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
                  unsigned int len);
 
@@ -406,6 +407,18 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
        }
 }
 
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+                                          int k, unsigned int size);
+
+static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
+                                    unsigned int size, void *buffer)
+{
+       if (k >= 0)
+               return skb_header_pointer(skb, k, size, buffer);
+
+       return bpf_internal_load_pointer_neg_helper(skb, k, size);
+}
+
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
 #include <linux/linkage.h>
index 6bff13f740505090eb53be8b4d91e0fe805a5191..75d17e15da338c1bcb05d1b27283ea577050b3aa 100644 (file)
@@ -1621,6 +1621,9 @@ enum ieee80211_reasoncode {
        WLAN_REASON_INVALID_RSN_IE_CAP = 22,
        WLAN_REASON_IEEE8021X_FAILED = 23,
        WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
+       /* TDLS (802.11z) */
+       WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25,
+       WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26,
        /* 802.11e */
        WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32,
        WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33,
index fd22789d7b2ed3e8003829d6da9be5e57d21a74c..808dcb8cc04fbeee82f00bab944ff45e8087078a 100644 (file)
@@ -36,8 +36,28 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
 
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
+
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
 int br_multicast_list_adjacent(struct net_device *dev,
                               struct list_head *br_ip_list);
+bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+#else
+static inline int br_multicast_list_adjacent(struct net_device *dev,
+                                            struct list_head *br_ip_list)
+{
+       return 0;
+}
+static inline bool br_multicast_has_querier_anywhere(struct net_device *dev,
+                                                    int proto)
+{
+       return false;
+}
+static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
+                                                    int proto)
+{
+       return false;
+}
+#endif
 
 #endif
index 2faef339d8f2bc2f0b9a4cc08446f21ea7879d50..ff560537dd61b334a3dfbfb559dfa0f1cff20f41 100644 (file)
@@ -39,6 +39,7 @@ struct ipv6_devconf {
 #endif
        __s32           proxy_ndp;
        __s32           accept_source_route;
+       __s32           accept_ra_from_local;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        __s32           optimistic_dad;
 #endif
@@ -193,12 +194,13 @@ struct ipv6_pinfo {
                                sndflow:1,
                                repflow:1,
                                pmtudisc:3,
-                               ipv6only:1,
+                               padding:1,      /* 1 bit hole */
                                srcprefs:3,     /* 001: prefer temporary address
                                                 * 010: prefer public address
                                                 * 100: prefer care-of address
                                                 */
-                               dontfrag:1;
+                               dontfrag:1,
+                               autoflowlabel:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
@@ -256,16 +258,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
        return inet_sk(__sk)->pinet6;
 }
 
-static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
-{
-       struct request_sock *req = reqsk_alloc(ops);
-
-       if (req)
-               inet_rsk(req)->pktopts = NULL;
-
-       return req;
-}
-
 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 {
        return (struct raw6_sock *)sk;
@@ -282,8 +274,8 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
        __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
 }
 
-#define __ipv6_only_sock(sk)   (inet6_sk(sk)->ipv6only)
-#define ipv6_only_sock(sk)     ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
+#define __ipv6_only_sock(sk)   (sk->sk_ipv6only)
+#define ipv6_only_sock(sk)     (__ipv6_only_sock(sk))
 #define ipv6_sk_rxinfo(sk)     ((sk)->sk_family == PF_INET6 && \
                                 inet6_sk(sk)->rxopt.bits.rxinfo)
 
@@ -296,8 +288,8 @@ static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
 
 static inline int inet_v6_ipv6only(const struct sock *sk)
 {
-       return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only;
+       /* ipv6only field is at same position for timewait and other sockets */
+       return ipv6_only_sock(sk);
 }
 #else
 #define __ipv6_only_sock(sk)   0
index 4c52907a6d8b54d41fbd63cc1839f159c0ed5207..a9e2268ecccb0c0f96a02add9b7f122421f223b6 100644 (file)
@@ -501,7 +501,7 @@ static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
 extern int hex_to_bin(char ch);
 extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
 
-int mac_pton(const char *s, u8 *mac);
+bool mac_pton(const char *s, u8 *mac);
 
 /*
  * General tracing related utility functions - trace_printk(),
index 35b51e7af88659f5c1f4e11ff84bc80c3420f6a1..fa660aedb822f05b2ba1068ca8499b51146dd3a7 100644 (file)
 #define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
 
+#define MLX4_NUM_UP                    8
+#define MLX4_NUM_TC                    8
+#define MLX4_MAX_100M_UNITS_VAL                255     /*
+                                                * work around: can't set values
+                                                * greater then this value when
+                                                * using 100 Mbps units.
+                                                */
+#define MLX4_RATELIMIT_100M_UNITS      3       /* 100 Mbps */
+#define MLX4_RATELIMIT_1G_UNITS                4       /* 1 Gbps */
+#define MLX4_RATELIMIT_DEFAULT         0x00ff
+
 #define MLX4_ROCE_MAX_GIDS     128
 #define MLX4_ROCE_PF_GIDS      16
 
index 66f9a04ec27041445afddbb70729b039719387c5..8e8fb3ed574b43d32867acbaa5b7f218b24c17d7 100644 (file)
@@ -943,7 +943,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *                   const unsigned char *addr)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
- *                    struct net_device *dev, int idx)
+ *                    struct net_device *dev, struct net_device *filter_dev,
+ *                    int idx)
  *     Used to add FDB entries to dump requests. Implementers should add
  *     entries to skb and update idx with the number of entries.
  *
@@ -1114,6 +1115,7 @@ struct net_device_ops {
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
                                                struct netlink_callback *cb,
                                                struct net_device *dev,
+                                               struct net_device *filter_dev,
                                                int idx);
 
        int                     (*ndo_bridge_setlink)(struct net_device *dev,
@@ -1379,6 +1381,8 @@ struct net_device {
        struct kset             *queues_kset;
 #endif
 
+       unsigned char           name_assign_type;
+
        bool                    uc_promisc;
        unsigned int            promiscuity;
        unsigned int            allmulti;
@@ -2486,7 +2490,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
  * as a distribution range limit for the returned value.
  */
 static inline u16 skb_tx_hash(const struct net_device *dev,
-                             const struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 }
@@ -2987,13 +2991,15 @@ void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+                                   unsigned char name_assign_type,
                                    void (*setup)(struct net_device *),
                                    unsigned int txqs, unsigned int rxqs);
-#define alloc_netdev(sizeof_priv, name, setup) \
-       alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+       alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 
-#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
-       alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
+       alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
+                        count)
 
 int register_netdev(struct net_device *dev);
 void unregister_netdev(struct net_device *dev);
@@ -3377,11 +3383,26 @@ extern struct pernet_operations __net_initdata loopback_net_ops;
 
 static inline const char *netdev_name(const struct net_device *dev)
 {
-       if (dev->reg_state != NETREG_REGISTERED)
-               return "(unregistered net_device)";
+       if (!dev->name[0] || strchr(dev->name, '%'))
+               return "(unnamed net_device)";
        return dev->name;
 }
 
+static inline const char *netdev_reg_state(const struct net_device *dev)
+{
+       switch (dev->reg_state) {
+       case NETREG_UNINITIALIZED: return " (uninitialized)";
+       case NETREG_REGISTERED: return "";
+       case NETREG_UNREGISTERING: return " (unregistering)";
+       case NETREG_UNREGISTERED: return " (unregistered)";
+       case NETREG_RELEASED: return " (released)";
+       case NETREG_DUMMY: return " (dummy)";
+       }
+
+       WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+       return " (unknown)";
+}
+
 __printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...);
@@ -3438,7 +3459,8 @@ do {                                                              \
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)                      \
-       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
+       WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),   \
+            netdev_reg_state(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
 
index 7dfed71d76a6e9823b75a5b47f353543a234cd36..159c987b1853106fc13e36391f8534383c947d46 100644 (file)
@@ -33,8 +33,8 @@
 #define PTP_CLASS_IPV4  0x10 /* event in an IPV4 UDP packet */
 #define PTP_CLASS_IPV6  0x20 /* event in an IPV6 UDP packet */
 #define PTP_CLASS_L2    0x30 /* event in a L2 packet */
-#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged L2 packet */
-#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
+#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */
+#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged packet */
 
 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
 #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
@@ -54,7 +54,6 @@
 #define IP6_HLEN       40
 #define UDP_HLEN       8
 #define OFF_IHL                14
-#define OFF_PTP6       (ETH_HLEN + IP6_HLEN + UDP_HLEN)
 #define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
 
 #if defined(CONFIG_NET_PTP_CLASSIFY)
index 0c8dc7195cdb290debd000a0b4837ac2baeab85c..93c0a64aefa6c4f2ebe8431904c1b5c74b5ffcff 100644 (file)
@@ -65,6 +65,7 @@
 #define        RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION  0x40010012
 #define RNDIS_STATUS_WW_INDICATION             RDIA_SPECIFIC_INDICATION
 #define RNDIS_STATUS_LINK_SPEED_CHANGE         0x40010013L
+#define RNDIS_STATUS_NETWORK_CHANGE            0x40010018
 
 #define RNDIS_STATUS_NOT_RESETTABLE            0x80010001
 #define RNDIS_STATUS_SOFT_ERRORS               0x80010003
index 953937ea5233c770d631bf3ae59be60b72630d88..167bae7bdfa45cc255cdf9c22b986c1fffe8f9d5 100644 (file)
@@ -78,6 +78,7 @@ extern void __rtnl_unlock(void);
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
                             struct net_device *dev,
+                            struct net_device *filter_dev,
                             int idx);
 extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
                            struct nlattr *tb[],
index ec89301ada418aff749bf924da1b7f63040ac048..369430340ed9ef6869406e30f54206f5c9c6f50a 100644 (file)
@@ -211,7 +211,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  * struct skb_shared_hwtstamps - hardware time stamps
  * @hwtstamp:  hardware time stamp transformed into duration
  *             since arbitrary point in time
- * @syststamp: hwtstamp transformed to system time base
+ * @syststamp: hwtstamp transformed to system time base (deprecated)
  *
  * Software time stamps generated by ktime_get_real() are stored in
  * skb->tstamp. The relation between the different kinds of time
@@ -222,7 +222,9 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  * syststamp/tstamp/"syststamp from other device" comparison is
  * limited by the accuracy of the transformation into system time
  * base. This depends on the device driver and its underlying
- * hardware.
+ * hardware. The syststamp implementation is deprecated in favor
+ * of hwtstamps and hw PTP clock sources exposed directly to
+ * userspace.
  *
  * hwtstamps can only be compared against other hwtstamps from
  * the same device.
@@ -455,6 +457,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
  *             ports.
+ *     @sw_hash: indicates hash was computed in software stack
  *     @wifi_acked_valid: wifi_acked was set
  *     @wifi_acked: whether frame was acked on wifi or not
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
@@ -562,6 +565,7 @@ struct sk_buff {
        __u8                    pfmemalloc:1;
        __u8                    ooo_okay:1;
        __u8                    l4_hash:1;
+       __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
        __u8                    no_fcs:1;
@@ -575,7 +579,7 @@ struct sk_buff {
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
        __u8                    csum_complete_sw:1;
-       /* 3/5 bit hole (depending on ndisc_nodetype presence) */
+       /* 2/4 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -830,13 +834,14 @@ static inline void
 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 {
        skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+       skb->sw_hash = 0;
        skb->hash = hash;
 }
 
 void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
-       if (!skb->l4_hash)
+       if (!skb->l4_hash && !skb->sw_hash)
                __skb_get_hash(skb);
 
        return skb->hash;
@@ -850,6 +855,7 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
        skb->hash = 0;
+       skb->sw_hash = 0;
        skb->l4_hash = 0;
 }
 
@@ -862,6 +868,7 @@ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
        to->hash = from->hash;
+       to->sw_hash = from->sw_hash;
        to->l4_hash = from->l4_hash;
 };
 
@@ -3005,7 +3012,7 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues);
 
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
new file mode 100644 (file)
index 0000000..85b8ee6
--- /dev/null
@@ -0,0 +1,26 @@
+/* Header file for cc2520 radio driver
+ *
+ * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
+ *                    Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
+ *                    P Sowjanya <sowjanyap@cdac.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __CC2520_H
+#define __CC2520_H
+
+struct cc2520_platform_data {
+       int fifo;
+       int fifop;
+       int cca;
+       int sfd;
+       int reset;
+       int vreg;
+};
+
+#endif
index a0513210798fc9027af01dccccc5ff6c677d3d7e..fa5258f322e7998664a257e2d17f8397cd4346b2 100644 (file)
@@ -111,10 +111,7 @@ struct tcp_request_sock_ops;
 
 struct tcp_request_sock {
        struct inet_request_sock        req;
-#ifdef CONFIG_TCP_MD5SIG
-       /* Only used by TCP MD5 Signature so far. */
        const struct tcp_request_sock_ops *af_specific;
-#endif
        struct sock                     *listener; /* needed for TFO */
        u32                             rcv_isn;
        u32                             snt_isn;
index 904777c1cd2420486a3df25636f61811ab35c29e..373000de610d4d3077f289a2f7a1967910ef09e3 100644 (file)
@@ -260,15 +260,15 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
 
 /* Skb helpers */
 struct l2cap_ctrl {
-       unsigned int    sframe:1,
-                       poll:1,
-                       final:1,
-                       fcs:1,
-                       sar:2,
-                       super:2;
-       __u16           reqseq;
-       __u16           txseq;
-       __u8            retries;
+       __u8    sframe:1,
+               poll:1,
+               final:1,
+               fcs:1,
+               sar:2,
+               super:2;
+       __u16   reqseq;
+       __u16   txseq;
+       __u8    retries;
 };
 
 struct hci_dev;
index 16587dcd6a9181b74814f1a4d0865332aa1bd81d..a01236e2df1327673946899f0a93f22e2672195f 100644 (file)
 
 /* HCI device quirks */
 enum {
+       /* When this quirk is set, the HCI Reset command is send when
+        * closing the transport instead of when opening it.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
        HCI_QUIRK_RESET_ON_CLOSE,
+
+       /* When this quirk is set, the device is turned into a raw-only
+        * device and it will stay in unconfigured state.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
        HCI_QUIRK_RAW_DEVICE,
+
+       /* When this quirk is set, the buffer sizes reported by
+        * HCI Read Buffer Size command are corrected if invalid.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
        HCI_QUIRK_FIXUP_BUFFER_SIZE,
+
+       /* When this quirk is set, then no stored link key handling
+        * is performed. This is mainly due to the fact that the
+        * HCI Delete Stored Link Key command is advertised, but
+        * not supported.
+        *
+        * This quirk must be set before hci_register_dev is called.
+        */
        HCI_QUIRK_BROKEN_STORED_LINK_KEY,
+
+       /* When this quirk is set, an external configuration step
+        * is required and will be indicated with the controller
+        * configuation.
+        *
+        * This quirk can be set before hci_register_dev is called or
+        * during the hdev->setup vendor callback.
+        */
+       HCI_QUIRK_EXTERNAL_CONFIG,
+
+       /* When this quirk is set, the public Bluetooth address
+        * initially reported by HCI Read BD Address command
+        * is considered invalid. Controller configuration is
+        * required before this device can be used.
+        *
+        * This quirk can be set before hci_register_dev is called or
+        * during the hdev->setup vendor callback.
+        */
+       HCI_QUIRK_INVALID_BDADDR,
 };
 
 /* HCI device flags */
@@ -104,24 +148,34 @@ enum {
        HCI_RESET,
 };
 
+/* BR/EDR and/or LE controller flags: the flags defined here should represent
+ * states configured via debugfs for debugging and testing purposes only.
+ */
+enum {
+       HCI_DUT_MODE,
+       HCI_FORCE_SC,
+       HCI_FORCE_STATIC_ADDR,
+};
+
 /*
  * BR/EDR and/or LE controller flags: the flags defined here should represent
  * states from the controller.
  */
 enum {
        HCI_SETUP,
+       HCI_CONFIG,
        HCI_AUTO_OFF,
        HCI_RFKILLED,
        HCI_MGMT,
        HCI_PAIRABLE,
        HCI_SERVICE_CACHE,
-       HCI_DEBUG_KEYS,
-       HCI_DUT_MODE,
-       HCI_FORCE_SC,
-       HCI_FORCE_STATIC_ADDR,
+       HCI_KEEP_DEBUG_KEYS,
+       HCI_USE_DEBUG_KEYS,
        HCI_UNREGISTER,
+       HCI_UNCONFIGURED,
        HCI_USER_CHANNEL,
-
+       HCI_EXT_CONFIGURED,
+       HCI_LE_ADV,
        HCI_LE_SCAN,
        HCI_SSP_ENABLED,
        HCI_SC_ENABLED,
@@ -139,7 +193,6 @@ enum {
        HCI_PERIODIC_INQ,
        HCI_FAST_CONNECTABLE,
        HCI_BREDR_ENABLED,
-       HCI_6LOWPAN_ENABLED,
        HCI_LE_SCAN_INTERRUPTED,
 };
 
@@ -147,7 +200,7 @@ enum {
  * or the HCI device is closed.
  */
 #define HCI_PERSISTENT_MASK (BIT(HCI_LE_SCAN) | BIT(HCI_PERIODIC_INQ) | \
-                             BIT(HCI_FAST_CONNECTABLE))
+                             BIT(HCI_FAST_CONNECTABLE) | BIT(HCI_LE_ADV))
 
 /* HCI ioctl defines */
 #define HCIDEVUP       _IOW('H', 201, int)
@@ -185,6 +238,7 @@ enum {
 #define HCI_AUTO_OFF_TIMEOUT   msecs_to_jiffies(2000)  /* 2 seconds */
 #define HCI_POWER_OFF_TIMEOUT  msecs_to_jiffies(5000)  /* 5 seconds */
 #define HCI_LE_CONN_TIMEOUT    msecs_to_jiffies(20000) /* 20 seconds */
+#define HCI_LE_AUTOCONN_TIMEOUT        msecs_to_jiffies(2000)  /* 2 seconds */
 
 /* HCI data types */
 #define HCI_COMMAND_PKT                0x01
@@ -301,6 +355,10 @@ enum {
 #define LMP_HOST_LE_BREDR      0x04
 #define LMP_HOST_SC            0x08
 
+/* LE features */
+#define HCI_LE_CONN_PARAM_REQ_PROC     0x02
+#define HCI_LE_PING                    0x10
+
 /* Connection modes */
 #define HCI_CM_ACTIVE  0x0000
 #define HCI_CM_HOLD    0x0001
@@ -347,17 +405,9 @@ enum {
 #define HCI_LK_CHANGED_COMBINATION     0x06
 #define HCI_LK_UNAUTH_COMBINATION_P256 0x07
 #define HCI_LK_AUTH_COMBINATION_P256   0x08
-/* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */
-#define HCI_SMP_STK                    0x80
-#define HCI_SMP_STK_SLAVE              0x81
-#define HCI_SMP_LTK                    0x82
-#define HCI_SMP_LTK_SLAVE              0x83
-
-/* Long Term Key types */
-#define HCI_LTK_UNAUTH                 0x00
-#define HCI_LTK_AUTH                   0x01
 
 /* ---- HCI Error Codes ---- */
+#define HCI_ERROR_UNKNOWN_CONN_ID      0x02
 #define HCI_ERROR_AUTH_FAILURE         0x05
 #define HCI_ERROR_MEMORY_EXCEEDED      0x07
 #define HCI_ERROR_CONNECTION_TIMEOUT   0x08
@@ -367,6 +417,7 @@ enum {
 #define HCI_ERROR_REMOTE_POWER_OFF     0x15
 #define HCI_ERROR_LOCAL_HOST_TERM      0x16
 #define HCI_ERROR_PAIRING_NOT_ALLOWED  0x18
+#define HCI_ERROR_INVALID_LL_PARAMS    0x1E
 #define HCI_ERROR_ADVERTISING_TIMEOUT  0x3c
 
 /* Flow control modes */
@@ -536,6 +587,11 @@ struct hci_cp_read_remote_version {
        __le16   handle;
 } __packed;
 
+#define HCI_OP_READ_CLOCK_OFFSET       0x041f
+struct hci_cp_read_clock_offset {
+       __le16   handle;
+} __packed;
+
 #define HCI_OP_SETUP_SYNC_CONN         0x0428
 struct hci_cp_setup_sync_conn {
        __le16   handle;
@@ -1085,6 +1141,18 @@ struct hci_rp_read_rssi {
        __s8     rssi;
 } __packed;
 
+#define HCI_OP_READ_CLOCK              0x1407
+struct hci_cp_read_clock {
+       __le16   handle;
+       __u8     which;
+} __packed;
+struct hci_rp_read_clock {
+       __u8     status;
+       __le16   handle;
+       __le32   clock;
+       __le16   accuracy;
+} __packed;
+
 #define HCI_OP_READ_LOCAL_AMP_INFO     0x1409
 struct hci_rp_read_local_amp_info {
        __u8     status;
@@ -1291,6 +1359,23 @@ struct hci_rp_le_read_supported_states {
        __u8    le_states[8];
 } __packed;
 
+#define HCI_OP_LE_CONN_PARAM_REQ_REPLY 0x2020
+struct hci_cp_le_conn_param_req_reply {
+       __le16  handle;
+       __le16  interval_min;
+       __le16  interval_max;
+       __le16  latency;
+       __le16  timeout;
+       __le16  min_ce_len;
+       __le16  max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY     0x2021
+struct hci_cp_le_conn_param_req_neg_reply {
+       __le16  handle;
+       __u8    reason;
+} __packed;
+
 /* ---- HCI Events ---- */
 #define HCI_EV_INQUIRY_COMPLETE                0x01
 
@@ -1670,6 +1755,15 @@ struct hci_ev_le_conn_complete {
        __u8     clk_accurancy;
 } __packed;
 
+#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
+struct hci_ev_le_conn_update_complete {
+       __u8     status;
+       __le16   handle;
+       __le16   interval;
+       __le16   latency;
+       __le16   supervision_timeout;
+} __packed;
+
 #define HCI_EV_LE_LTK_REQ              0x05
 struct hci_ev_le_ltk_req {
        __le16  handle;
@@ -1677,6 +1771,15 @@ struct hci_ev_le_ltk_req {
        __le16  ediv;
 } __packed;
 
+#define HCI_EV_LE_REMOTE_CONN_PARAM_REQ        0x06
+struct hci_ev_le_remote_conn_param_req {
+       __le16 handle;
+       __le16 interval_min;
+       __le16 interval_max;
+       __le16 latency;
+       __le16 timeout;
+} __packed;
+
 /* Advertising report event types */
 #define LE_ADV_IND             0x00
 #define LE_ADV_DIRECT_IND      0x01
index b386bf17e6c2c10808c8306ad7cbb4bb42e4713d..e69c2b08c0c6194aa898ad820cfc6e746f61c9c0 100644 (file)
@@ -71,6 +71,7 @@ struct discovery_state {
        bdaddr_t                last_adv_addr;
        u8                      last_adv_addr_type;
        s8                      last_adv_rssi;
+       u32                     last_adv_flags;
        u8                      last_adv_data[HCI_MAX_AD_LENGTH];
        u8                      last_adv_data_len;
 };
@@ -170,6 +171,8 @@ struct hci_dev {
        __u8            bus;
        __u8            dev_type;
        bdaddr_t        bdaddr;
+       bdaddr_t        setup_addr;
+       bdaddr_t        public_addr;
        bdaddr_t        random_addr;
        bdaddr_t        static_addr;
        __u8            adv_addr_type;
@@ -203,10 +206,13 @@ struct hci_dev {
        __u16           le_scan_window;
        __u16           le_conn_min_interval;
        __u16           le_conn_max_interval;
+       __u16           le_conn_latency;
+       __u16           le_supv_timeout;
        __u16           discov_interleaved_timeout;
        __u16           conn_info_min_age;
        __u16           conn_info_max_age;
        __u8            ssp_debug_mode;
+       __u32           clock;
 
        __u16           devid_source;
        __u16           devid_vendor;
@@ -273,7 +279,7 @@ struct hci_dev {
 
        struct delayed_work     service_cache;
 
-       struct timer_list       cmd_timer;
+       struct delayed_work     cmd_timer;
 
        struct work_struct      rx_work;
        struct work_struct      cmd_work;
@@ -299,6 +305,7 @@ struct hci_dev {
 
        struct list_head        mgmt_pending;
        struct list_head        blacklist;
+       struct list_head        whitelist;
        struct list_head        uuids;
        struct list_head        link_keys;
        struct list_head        long_term_keys;
@@ -307,6 +314,7 @@ struct hci_dev {
        struct list_head        le_white_list;
        struct list_head        le_conn_params;
        struct list_head        pend_le_conns;
+       struct list_head        pend_le_reports;
 
        struct hci_dev_stats    stat;
 
@@ -318,6 +326,7 @@ struct hci_dev {
 
        struct rfkill           *rfkill;
 
+       unsigned long           dbg_flags;
        unsigned long           dev_flags;
 
        struct delayed_work     le_scan_disable;
@@ -339,6 +348,7 @@ struct hci_dev {
        int (*setup)(struct hci_dev *hdev);
        int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
+       int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
 
 #define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -366,7 +376,6 @@ struct hci_conn {
        __u8            features[HCI_MAX_PAGES][8];
        __u16           pkt_type;
        __u16           link_policy;
-       __u32           link_mode;
        __u8            key_type;
        __u8            auth_type;
        __u8            sec_level;
@@ -377,20 +386,26 @@ struct hci_conn {
        __u32           passkey_notify;
        __u8            passkey_entered;
        __u16           disc_timeout;
+       __u16           conn_timeout;
        __u16           setting;
        __u16           le_conn_min_interval;
        __u16           le_conn_max_interval;
+       __u16           le_conn_interval;
+       __u16           le_conn_latency;
+       __u16           le_supv_timeout;
        __s8            rssi;
        __s8            tx_power;
        __s8            max_tx_power;
        unsigned long   flags;
 
+       __u32           clock;
+       __u16           clock_accuracy;
+
        unsigned long   conn_info_timestamp;
 
        __u8            remote_cap;
        __u8            remote_auth;
        __u8            remote_id;
-       bool            flush_key;
 
        unsigned int    sent;
 
@@ -407,7 +422,6 @@ struct hci_conn {
        struct hci_dev  *hdev;
        void            *l2cap_data;
        void            *sco_data;
-       void            *smp_conn;
        struct amp_mgr  *amp_mgr;
 
        struct hci_conn *link;
@@ -428,15 +442,19 @@ struct hci_chan {
 
 struct hci_conn_params {
        struct list_head list;
+       struct list_head action;
 
        bdaddr_t addr;
        u8 addr_type;
 
        u16 conn_min_interval;
        u16 conn_max_interval;
+       u16 conn_latency;
+       u16 supervision_timeout;
 
        enum {
                HCI_AUTO_CONN_DISABLED,
+               HCI_AUTO_CONN_REPORT,
                HCI_AUTO_CONN_ALWAYS,
                HCI_AUTO_CONN_LINK_LOSS,
        } auto_connect;
@@ -501,8 +519,8 @@ struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
                                                       int state);
 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
                                      struct inquiry_entry *ie);
-bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
-                             bool name_known, bool *ssp);
+u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+                            bool name_known);
 void hci_inquiry_cache_flush(struct hci_dev *hdev);
 
 /* ----- HCI Connections ----- */
@@ -520,7 +538,13 @@ enum {
        HCI_CONN_AES_CCM,
        HCI_CONN_POWER_SAVE,
        HCI_CONN_REMOTE_OOB,
-       HCI_CONN_6LOWPAN,
+       HCI_CONN_FLUSH_KEY,
+       HCI_CONN_MASTER,
+       HCI_CONN_ENCRYPT,
+       HCI_CONN_AUTH,
+       HCI_CONN_SECURE,
+       HCI_CONN_FIPS,
+       HCI_CONN_STK_ENCRYPT,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -681,7 +705,8 @@ void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
-                               u8 dst_type, u8 sec_level, u8 auth_type);
+                               u8 dst_type, u8 sec_level, u16 conn_timeout,
+                               bool master);
 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
                                 u8 sec_level, u8 auth_type);
 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -825,30 +850,25 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
 int hci_inquiry(void __user *arg);
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
-                                        bdaddr_t *bdaddr, u8 type);
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-
-struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
-                                         bdaddr_t *bdaddr, u8 type);
-void hci_white_list_clear(struct hci_dev *hdev);
-int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
+                                          bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
+void hci_bdaddr_list_clear(struct list_head *list);
 
 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
                                               bdaddr_t *addr, u8 addr_type);
-int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
-                       u8 auto_connect, u16 conn_min_interval,
-                       u16 conn_max_interval);
+struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+                                           bdaddr_t *addr, u8 addr_type);
+int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+                       u8 auto_connect);
 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
-void hci_conn_params_clear(struct hci_dev *hdev);
+void hci_conn_params_clear_all(struct hci_dev *hdev);
+void hci_conn_params_clear_disabled(struct hci_dev *hdev);
 
-struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
-                                           bdaddr_t *addr, u8 addr_type);
-void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
-void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
-void hci_pend_le_conns_clear(struct hci_dev *hdev);
+struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+                                                 bdaddr_t *addr,
+                                                 u8 addr_type);
 
 void hci_update_background_scan(struct hci_dev *hdev);
 
@@ -856,8 +876,9 @@ void hci_uuids_clear(struct hci_dev *hdev);
 
 void hci_link_keys_clear(struct hci_dev *hdev);
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
-                    bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
+struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
+                                 bdaddr_t *bdaddr, u8 *val, u8 type,
+                                 u8 pin_len, bool *persistent);
 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
                             bool master);
 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -1021,7 +1042,7 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
        if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
                return;
 
-       encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
+       encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
        l2cap_security_cfm(conn, status, encrypt);
 
        if (conn->security_cfm_cb)
@@ -1062,7 +1083,7 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
        if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
                return;
 
-       encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
+       encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
 
        read_lock(&hci_cb_list_lock);
        list_for_each_entry(cb, &hci_cb_list, list) {
@@ -1147,7 +1168,7 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
 
 static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
 {
-       if (addr_type != 0x01)
+       if (addr_type != ADDR_LE_DEV_RANDOM)
                return false;
 
        if ((bdaddr->b[5] & 0xc0) == 0x40)
@@ -1156,6 +1177,18 @@ static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
        return false;
 }
 
+static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
+{
+       if (addr_type == ADDR_LE_DEV_PUBLIC)
+               return true;
+
+       /* Check for Random Static address type */
+       if ((addr->b[5] & 0xc0) == 0xc0)
+               return true;
+
+       return false;
+}
+
 static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
                                          bdaddr_t *bdaddr, u8 addr_type)
 {
@@ -1165,6 +1198,27 @@ static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
        return hci_find_irk_by_rpa(hdev, bdaddr);
 }
 
+static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
+                                       u16 to_multiplier)
+{
+       u16 max_latency;
+
+       if (min > max || min < 6 || max > 3200)
+               return -EINVAL;
+
+       if (to_multiplier < 10 || to_multiplier > 3200)
+               return -EINVAL;
+
+       if (max >= to_multiplier * 8)
+               return -EINVAL;
+
+       max_latency = (to_multiplier * 8 / max) - 1;
+       if (latency > 499 || latency > max_latency)
+               return -EINVAL;
+
+       return 0;
+}
+
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
@@ -1227,6 +1281,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
 
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
+int mgmt_new_settings(struct hci_dev *hdev);
 void mgmt_index_added(struct hci_dev *hdev);
 void mgmt_index_removed(struct hci_dev *hdev);
 void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
@@ -1234,7 +1289,6 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered);
 void mgmt_discoverable_timeout(struct hci_dev *hdev);
 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
 void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
-void mgmt_advertising(struct hci_dev *hdev, u8 advertising);
 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
                       bool persistent);
@@ -1281,18 +1335,18 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
                                       u8 *randomizer192, u8 *hash256,
                                       u8 *randomizer256, u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
-                      u8 scan_rsp_len);
+                      u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
+                      u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
-int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
                   bool persistent);
+void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                        u8 bdaddr_type, u8 store_hint, u16 min_interval,
+                        u16 max_interval, u16 latency, u16 timeout);
 void mgmt_reenable_advertising(struct hci_dev *hdev);
 void mgmt_smp_complete(struct hci_conn *conn, bool complete);
 
@@ -1324,8 +1378,8 @@ struct hci_sec_filter {
 #define hci_req_lock(d)                mutex_lock(&d->req_lock)
 #define hci_req_unlock(d)      mutex_unlock(&d->req_lock)
 
-void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
-                                       u16 latency, u16 to_multiplier);
+u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
+                     u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
                                                        __u8 ltk[16]);
 
index 4abdcb220e3ac7a0558a9a0115b873a5312f3f07..e0c6a9abdb6282e805f46556223085a191f7810d 100644 (file)
@@ -137,7 +137,6 @@ struct l2cap_conninfo {
 #define L2CAP_FC_L2CAP         0x02
 #define L2CAP_FC_CONNLESS      0x04
 #define L2CAP_FC_A2MP          0x08
-#define L2CAP_FC_6LOWPAN        0x3e /* reserved and temporary value */
 
 /* L2CAP Control Field bit masks */
 #define L2CAP_CTRL_SAR                 0xC000
@@ -579,7 +578,7 @@ struct l2cap_chan {
        struct list_head        global_l;
 
        void                    *data;
-       struct l2cap_ops        *ops;
+       const struct l2cap_ops  *ops;
        struct mutex            lock;
 };
 
@@ -600,7 +599,12 @@ struct l2cap_ops {
        void                    (*set_shutdown) (struct l2cap_chan *chan);
        long                    (*get_sndtimeo) (struct l2cap_chan *chan);
        struct sk_buff          *(*alloc_skb) (struct l2cap_chan *chan,
+                                              unsigned long hdr_len,
                                               unsigned long len, int nb);
+       int                     (*memcpy_fromiovec) (struct l2cap_chan *chan,
+                                                    unsigned char *kdata,
+                                                    struct iovec *iov,
+                                                    int len);
 };
 
 struct l2cap_conn {
@@ -856,6 +860,31 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
        return 0;
 }
 
+static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan,
+                                                unsigned char *kdata,
+                                                struct iovec *iov,
+                                                int len)
+{
+       /* Following is safe since for compiler definitions of kvec and
+        * iovec are identical, yielding the same in-core layout and alignment
+        */
+       struct kvec *vec = (struct kvec *)iov;
+
+       while (len > 0) {
+               if (vec->iov_len) {
+                       int copy = min_t(unsigned int, len, vec->iov_len);
+                       memcpy(kdata, vec->iov_base, copy);
+                       len -= copy;
+                       kdata += copy;
+                       vec->iov_base += copy;
+                       vec->iov_len -= copy;
+               }
+               vec++;
+       }
+
+       return 0;
+}
+
 extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
@@ -872,8 +901,7 @@ struct l2cap_chan *l2cap_chan_create(void);
 void l2cap_chan_close(struct l2cap_chan *chan, int reason);
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                       bdaddr_t *dst, u8 dst_type);
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
-                                                               u32 priority);
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
 void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
 int l2cap_chan_check_security(struct l2cap_chan *chan);
 void l2cap_chan_set_defaults(struct l2cap_chan *chan);
index bcffc9ae0c89bec0bd3237d708e76d0911abfa36..623d5203c5926292d626ff5aa2d5311f3dfcb33b 100644 (file)
@@ -97,6 +97,7 @@ struct mgmt_rp_read_index_list {
 #define MGMT_SETTING_SECURE_CONN       0x00000800
 #define MGMT_SETTING_DEBUG_KEYS                0x00001000
 #define MGMT_SETTING_PRIVACY           0x00002000
+#define MGMT_SETTING_CONFIGURATION     0x00004000
 
 #define MGMT_OP_READ_INFO              0x0004
 #define MGMT_READ_INFO_SIZE            0
@@ -424,6 +425,76 @@ struct mgmt_rp_get_conn_info {
        __s8    max_tx_power;
 } __packed;
 
+#define MGMT_OP_GET_CLOCK_INFO         0x0032
+struct mgmt_cp_get_clock_info {
+       struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_GET_CLOCK_INFO_SIZE       MGMT_ADDR_INFO_SIZE
+struct mgmt_rp_get_clock_info {
+       struct mgmt_addr_info addr;
+       __le32  local_clock;
+       __le32  piconet_clock;
+       __le16  accuracy;
+} __packed;
+
+#define MGMT_OP_ADD_DEVICE             0x0033
+struct mgmt_cp_add_device {
+       struct mgmt_addr_info addr;
+       __u8    action;
+} __packed;
+#define MGMT_ADD_DEVICE_SIZE           (MGMT_ADDR_INFO_SIZE + 1)
+
+#define MGMT_OP_REMOVE_DEVICE          0x0034
+struct mgmt_cp_remove_device {
+       struct mgmt_addr_info addr;
+} __packed;
+#define MGMT_REMOVE_DEVICE_SIZE                MGMT_ADDR_INFO_SIZE
+
+struct mgmt_conn_param {
+       struct mgmt_addr_info addr;
+       __le16 min_interval;
+       __le16 max_interval;
+       __le16 latency;
+       __le16 timeout;
+} __packed;
+
+#define MGMT_OP_LOAD_CONN_PARAM                0x0035
+struct mgmt_cp_load_conn_param {
+       __le16 param_count;
+       struct mgmt_conn_param params[0];
+} __packed;
+#define MGMT_LOAD_CONN_PARAM_SIZE      2
+
+#define MGMT_OP_READ_UNCONF_INDEX_LIST 0x0036
+#define MGMT_READ_UNCONF_INDEX_LIST_SIZE 0
+struct mgmt_rp_read_unconf_index_list {
+       __le16  num_controllers;
+       __le16  index[0];
+} __packed;
+
+#define MGMT_OPTION_EXTERNAL_CONFIG    0x00000001
+#define MGMT_OPTION_PUBLIC_ADDRESS     0x00000002
+
+#define MGMT_OP_READ_CONFIG_INFO       0x0037
+#define MGMT_READ_CONFIG_INFO_SIZE     0
+struct mgmt_rp_read_config_info {
+       __le16  manufacturer;
+       __le32  supported_options;
+       __le32  missing_options;
+} __packed;
+
+#define MGMT_OP_SET_EXTERNAL_CONFIG    0x0038
+struct mgmt_cp_set_external_config {
+       __u8 config;
+} __packed;
+#define MGMT_SET_EXTERNAL_CONFIG_SIZE  1
+
+#define MGMT_OP_SET_PUBLIC_ADDRESS     0x0039
+struct mgmt_cp_set_public_address {
+       bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_PUBLIC_ADDRESS_SIZE   6
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16  opcode;
@@ -522,6 +593,7 @@ struct mgmt_ev_auth_failed {
 
 #define MGMT_DEV_FOUND_CONFIRM_NAME    0x01
 #define MGMT_DEV_FOUND_LEGACY_PAIRING  0x02
+#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04
 
 #define MGMT_EV_DEVICE_FOUND           0x0012
 struct mgmt_ev_device_found {
@@ -578,3 +650,30 @@ struct mgmt_ev_new_csrk {
        __u8 store_hint;
        struct mgmt_csrk_info key;
 } __packed;
+
+#define MGMT_EV_DEVICE_ADDED           0x001a
+struct mgmt_ev_device_added {
+       struct mgmt_addr_info addr;
+       __u8 action;
+} __packed;
+
+#define MGMT_EV_DEVICE_REMOVED         0x001b
+struct mgmt_ev_device_removed {
+       struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_EV_NEW_CONN_PARAM         0x001c
+struct mgmt_ev_new_conn_param {
+       struct mgmt_addr_info addr;
+       __u8 store_hint;
+       __le16 min_interval;
+       __le16 max_interval;
+       __le16 latency;
+       __le16 timeout;
+} __packed;
+
+#define MGMT_EV_UNCONF_INDEX_ADDED     0x001d
+
+#define MGMT_EV_UNCONF_INDEX_REMOVED   0x001e
+
+#define MGMT_EV_NEW_CONFIG_OPTIONS     0x001f
index e46c437944f73e66cb275f4adc345a1f869923c8..0a080c4de2754d0b9fc64ecf54e8dc4bcb477cf3 100644 (file)
@@ -2266,10 +2266,6 @@ struct cfg80211_qos_map {
  *
  * @get_antenna: Get current antenna configuration from device (tx_ant, rx_ant).
  *
- * @set_ringparam: Set tx and rx ring sizes.
- *
- * @get_ringparam: Get tx and rx ring current and maximum sizes.
- *
  * @tdls_mgmt: Transmit a TDLS management frame.
  * @tdls_oper: Perform a high-level TDLS operation (e.g. TDLS link setup).
  *
@@ -2278,16 +2274,6 @@ struct cfg80211_qos_map {
  *
  * @set_noack_map: Set the NoAck Map for the TIDs.
  *
- * @get_et_sset_count:  Ethtool API to get string-set count.
- *     See @ethtool_ops.get_sset_count
- *
- * @get_et_stats:  Ethtool API to get a set of u64 stats.
- *     See @ethtool_ops.get_ethtool_stats
- *
- * @get_et_strings:  Ethtool API to get a set of strings to describe stats
- *     and perhaps other supported types of ethtool data-sets.
- *     See @ethtool_ops.get_strings
- *
  * @get_channel: Get the current operating channel for the virtual interface.
  *     For monitor interfaces, it should return %NULL unless there's a single
  *     current monitoring channel.
@@ -2315,7 +2301,12 @@ struct cfg80211_qos_map {
  *     reliability. This operation can not fail.
  * @set_coalesce: Set coalesce parameters.
  *
- * @channel_switch: initiate channel-switch procedure (with CSA)
+ * @channel_switch: initiate channel-switch procedure (with CSA). Driver is
+ *     responsible for veryfing if the switch is possible. Since this is
+ *     inherently tricky driver may decide to disconnect an interface later
+ *     with cfg80211_stop_iface(). This doesn't mean driver can accept
+ *     everything. It should do it's best to verify requests and reject them
+ *     as soon as possible.
  *
  * @set_qos_map: Set QoS mapping information to the driver
  *
@@ -2503,10 +2494,6 @@ struct cfg80211_ops {
        int     (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
        int     (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant);
 
-       int     (*set_ringparam)(struct wiphy *wiphy, u32 tx, u32 rx);
-       void    (*get_ringparam)(struct wiphy *wiphy,
-                                u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max);
-
        int     (*sched_scan_start)(struct wiphy *wiphy,
                                struct net_device *dev,
                                struct cfg80211_sched_scan_request *request);
@@ -2518,7 +2505,7 @@ struct cfg80211_ops {
        int     (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
                             const u8 *peer, u8 action_code,  u8 dialog_token,
                             u16 status_code, u32 peer_capability,
-                            const u8 *buf, size_t len);
+                            bool initiator, const u8 *buf, size_t len);
        int     (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
                             const u8 *peer, enum nl80211_tdls_operation oper);
 
@@ -2529,13 +2516,6 @@ struct cfg80211_ops {
                                  struct net_device *dev,
                                  u16 noack_map);
 
-       int     (*get_et_sset_count)(struct wiphy *wiphy,
-                                    struct net_device *dev, int sset);
-       void    (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev,
-                               struct ethtool_stats *stats, u64 *data);
-       void    (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev,
-                                 u32 sset, u8 *data);
-
        int     (*get_channel)(struct wiphy *wiphy,
                               struct wireless_dev *wdev,
                               struct cfg80211_chan_def *chandef);
@@ -4843,6 +4823,10 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
  */
 void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy);
 
+
+/* ethtool helper */
+void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
index a975edf21b22c3678194972db693645e48dd46e6..597b88a94332150c2da3f163daccbc012ce1b9cd 100644 (file)
@@ -81,8 +81,8 @@ struct dcbnl_rtnl_ops {
        void (*setbcncfg)(struct net_device *, int, u32);
        void (*getbcnrp)(struct net_device *, int, u8 *);
        void (*setbcnrp)(struct net_device *, int, u8);
-       u8   (*setapp)(struct net_device *, u8, u16, u8);
-       u8   (*getapp)(struct net_device *, u8, u16);
+       int  (*setapp)(struct net_device *, u8, u16, u8);
+       int  (*getapp)(struct net_device *, u8, u16);
        u8   (*getfeatcfg)(struct net_device *, int, u8 *);
        u8   (*setfeatcfg)(struct net_device *, int, u8);
 
index 7e64bd8bbda941319b494bcb7b66e79b7f52370a..6667a054763adfad3407d59d0f6810fb13f56a14 100644 (file)
@@ -1,6 +1,19 @@
 #ifndef _NET_FLOW_KEYS_H
 #define _NET_FLOW_KEYS_H
 
+/* struct flow_keys:
+ *     @src: source ip address in case of IPv4
+ *           For IPv6 it contains 32bit hash of src address
+ *     @dst: destination ip address in case of IPv4
+ *           For IPv6 it contains 32bit hash of dst address
+ *     @ports: port numbers of Transport header
+ *             port16[0]: src port number
+ *             port16[1]: dst port number
+ *     @thoff: Transport header offset
+ *     @n_proto: Network header protocol (eg. IPv4/IPv6)
+ *     @ip_proto: Transport header protocol (eg. TCP/UDP)
+ * All the members, except thoff, are in network byte order.
+ */
 struct flow_keys {
        /* (src,dst) must be grouped, in the same way than in IP header */
        __be32 src;
@@ -10,9 +23,11 @@ struct flow_keys {
                __be16 port16[2];
        };
        u16 thoff;
+       u16 n_proto;
        u8 ip_proto;
 };
 
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+u32 flow_hash_from_keys(struct flow_keys *keys);
 #endif
index b4956a5fcc3f117e9e4199a5ad7af94894fea859..d07b1a64b4e721958b9234aab1a94ab3ad40d63d 100644 (file)
@@ -205,6 +205,7 @@ struct inet6_dev {
        struct timer_list       rs_timer;
        __u8                    rs_probes;
 
+       __u8                    addr_gen_mode;
        unsigned long           tstamp; /* ipv6InterfaceTable update timestamp */
        struct rcu_head         rcu;
 };
index b1edf17bec01130f9751747c4d092e5de50aaeac..a829b77523cf3f28704dbc593ebf112802886951 100644 (file)
@@ -88,8 +88,10 @@ struct inet_request_sock {
                                acked      : 1,
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
-       struct ip_options_rcu   *opt;
-       struct sk_buff          *pktopts;
+       union {
+               struct ip_options_rcu   *opt;
+               struct sk_buff          *pktopts;
+       };
        u32                     ir_mark;
 };
 
index 61474ea02152dc0886901044b4fd210ee5d9596d..6c566034e26d9bed72bfbcce39a308b9e315d2bb 100644 (file)
@@ -108,6 +108,7 @@ struct inet_timewait_sock {
 #define tw_family              __tw_common.skc_family
 #define tw_state               __tw_common.skc_state
 #define tw_reuse               __tw_common.skc_reuse
+#define tw_ipv6only            __tw_common.skc_ipv6only
 #define tw_bound_dev_if                __tw_common.skc_bound_dev_if
 #define tw_node                        __tw_common.skc_nulls_node
 #define tw_bind_node           __tw_common.skc_bind_node
@@ -131,7 +132,7 @@ struct inet_timewait_sock {
        __be16                  tw_sport;
        kmemcheck_bitfield_begin(flags);
        /* And these are ours. */
-       unsigned int            tw_ipv6only     : 1,
+       unsigned int            tw_pad0         : 1,    /* 1 bit hole */
                                tw_transparent  : 1,
                                tw_flowlabel    : 20,
                                tw_pad          : 2,    /* 2 bits hole */
index 0e795df05ec983d07d7acc52cddabd728ea6e0b1..2e8f055989c3c982df11c0160ae409e98814bcab 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 
 struct sock;
 
@@ -353,6 +354,19 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
                                  skb->len, proto, 0);
 }
 
+static inline void inet_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = inet->inet_saddr;
+       keys.dst = inet->inet_daddr;
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
 /*
  *     Map a multicast IP onto multicast MAC for type ethernet.
  */
index 574337fe72ddbd3038c061dab5aef142ea0f068b..a25017247457cbf84510ffee6190052ae650c765 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/if_inet6.h>
 #include <net/ndisc.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 #include <net/snmp.h>
 
 #define SIN6_LEN_RFC2133       24
@@ -557,24 +558,29 @@ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-       const unsigned long *ul = (const unsigned long *)a;
+       const __be64 *be = (const __be64 *)a;
 
-       return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL;
+       return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
 #else
        return (a->s6_addr32[0] | a->s6_addr32[1] |
-               a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
+               a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
 #endif
 }
 
+/*
+ * Note that we must __force cast these to unsigned long to make sparse happy,
+ * since all of the endian-annotated types are fixed size regardless of arch.
+ */
 static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
 {
        return (
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-               *(__be64 *)a |
+               *(unsigned long *)a |
 #else
-               (a->s6_addr32[0] | a->s6_addr32[1]) |
+               (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
 #endif
-               (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
+               (__force unsigned long)(a->s6_addr32[2] ^
+                                       cpu_to_be32(0x0000ffff))) == 0UL;
 }
 
 /*
@@ -684,6 +690,50 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
        return hlimit;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
+       keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+                                       __be32 flowlabel, bool autolabel)
+{
+       if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
+               __be32 hash;
+
+               hash = skb_get_hash(skb);
+
+               /* Since this is being sent on the wire obfuscate hash a bit
+                * to minimize possbility that any useful information to an
+                * attacker is leaked. Only lower 20 bits are relevant.
+                */
+               hash ^= hash >> 12;
+
+               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+       }
+
+       return flowlabel;
+}
+#else
+static inline void ip6_set_txhash(struct sock *sk) { }
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+                                       __be32 flowlabel, bool autolabel)
+{
+       return flowlabel;
+}
+#endif
+
+
 /*
  *     Header manipulation
  */
index 421b6ecb4b2cdee892379212f9d252f3f3ecffbc..9ce5cb17ed826a83979d787b9a43e0f19d14f383 100644 (file)
@@ -754,20 +754,25 @@ struct ieee80211_tx_info {
 };
 
 /**
- * struct ieee80211_sched_scan_ies - scheduled scan IEs
+ * struct ieee80211_scan_ies - descriptors for different blocks of IEs
  *
- * This structure is used to pass the appropriate IEs to be used in scheduled
- * scans for all bands.  It contains both the IEs passed from the userspace
+ * This structure is used to point to different blocks of IEs in HW scan
+ * and scheduled scan. These blocks contain the IEs passed by userspace
  * and the ones generated by mac80211.
  *
- * @ie: array with the IEs for each supported band
- * @len: array with the total length of the IEs for each band
+ * @ies: pointers to band specific IEs.
+ * @len: lengths of band_specific IEs.
+ * @common_ies: IEs for all bands (especially vendor specific ones)
+ * @common_ie_len: length of the common_ies
  */
-struct ieee80211_sched_scan_ies {
-       u8 *ie[IEEE80211_NUM_BANDS];
+struct ieee80211_scan_ies {
+       const u8 *ies[IEEE80211_NUM_BANDS];
        size_t len[IEEE80211_NUM_BANDS];
+       const u8 *common_ies;
+       size_t common_ie_len;
 };
 
+
 static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
 {
        return (struct ieee80211_tx_info *)skb->cb;
@@ -1601,11 +1606,8 @@ struct ieee80211_tx_control {
  *     is not enabled the default action is to disconnect when getting the
  *     CSA frame.
  *
- * @IEEE80211_HW_CHANGE_RUNNING_CHANCTX: The hardware can change a
- *     channel context on-the-fly.  This is needed for channel switch
- *     on single-channel hardware.  It can also be used as an
- *     optimization in certain channel switch cases with
- *     multi-channel.
+ * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
+ *     in one command, mac80211 doesn't have to run separate scans per band.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1637,7 +1639,8 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_TIMING_BEACON_ONLY                 = 1<<26,
        IEEE80211_HW_SUPPORTS_HT_CCK_RATES              = 1<<27,
        IEEE80211_HW_CHANCTX_STA_CSA                    = 1<<28,
-       IEEE80211_HW_CHANGE_RUNNING_CHANCTX             = 1<<29,
+       /* bit 29 unused */
+       IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS           = 1<<30,
 };
 
 /**
@@ -1763,6 +1766,19 @@ struct ieee80211_hw {
        const struct ieee80211_cipher_scheme *cipher_schemes;
 };
 
+/**
+ * struct ieee80211_scan_request - hw scan request
+ *
+ * @ies: pointers different parts of IEs (in req.ie)
+ * @req: cfg80211 request.
+ */
+struct ieee80211_scan_request {
+       struct ieee80211_scan_ies ies;
+
+       /* Keep last */
+       struct cfg80211_scan_request req;
+};
+
 /**
  * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy
  *
@@ -2764,6 +2780,15 @@ enum ieee80211_roc_type {
  *     mac80211 will transmit the frame right away.
  *     The callback is optional and can (should!) sleep.
  *
+ * @mgd_protect_tdls_discover: Protect a TDLS discovery session. After sending
+ *     a TDLS discovery-request, we expect a reply to arrive on the AP's
+ *     channel. We must stay on the channel (no PSM, scan, etc.), since a TDLS
+ *     setup-response is a direct packet not buffered by the AP.
+ *     mac80211 will call this function just before the transmission of a TDLS
+ *     discovery-request. The recommended period of protection is at least
+ *     2 * (DTIM period).
+ *     The callback is optional and can sleep.
+ *
  * @add_chanctx: Notifies device driver about new channel context creation.
  * @remove_chanctx: Notifies device driver about channel context destruction.
  * @change_chanctx: Notifies device driver about channel context changes that
@@ -2865,13 +2890,13 @@ struct ieee80211_ops {
        void (*set_default_unicast_key)(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif, int idx);
        int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                      struct cfg80211_scan_request *req);
+                      struct ieee80211_scan_request *req);
        void (*cancel_hw_scan)(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif);
        int (*sched_scan_start)(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif,
                                struct cfg80211_sched_scan_request *req,
-                               struct ieee80211_sched_scan_ies *ies);
+                               struct ieee80211_scan_ies *ies);
        int (*sched_scan_stop)(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif);
        void (*sw_scan_start)(struct ieee80211_hw *hw);
@@ -2981,6 +3006,9 @@ struct ieee80211_ops {
        void    (*mgd_prepare_tx)(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif);
 
+       void    (*mgd_protect_tdls_discover)(struct ieee80211_hw *hw,
+                                            struct ieee80211_vif *vif);
+
        int (*add_chanctx)(struct ieee80211_hw *hw,
                           struct ieee80211_chanctx_conf *ctx);
        void (*remove_chanctx)(struct ieee80211_hw *hw,
@@ -4815,4 +4843,17 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr,
  */
 void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf);
 
+/**
+ * ieee80211_tdls_oper - request userspace to perform a TDLS operation
+ * @vif: virtual interface
+ * @peer: the peer's destination address
+ * @oper: the requested TDLS operation
+ * @reason_code: reason code for the operation, valid for TDLS teardown
+ * @gfp: allocation flags
+ *
+ * See cfg80211_tdls_oper_request().
+ */
+void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
+                                enum nl80211_tdls_operation oper,
+                                u16 reason_code, gfp_t gfp);
 #endif /* MAC80211_H */
index a591053cae6305c399eabb745c6f1764c41bc8be..2e67cdd19cdc6d34c322056c52dc72fedf2c416b 100644 (file)
@@ -80,6 +80,25 @@ struct ieee802154_dev {
 #define        IEEE802154_HW_OMIT_CKSUM        0x00000001
 /* Indicates that receiver will autorespond with ACK frames. */
 #define        IEEE802154_HW_AACK              0x00000002
+/* Indicates that transceiver will support transmit power setting. */
+#define        IEEE802154_HW_TXPOWER           0x00000004
+/* Indicates that transceiver will support listen before transmit. */
+#define        IEEE802154_HW_LBT               0x00000008
+/* Indicates that transceiver will support cca mode setting. */
+#define        IEEE802154_HW_CCA_MODE          0x00000010
+/* Indicates that transceiver will support cca ed level setting. */
+#define        IEEE802154_HW_CCA_ED_LEVEL      0x00000020
+/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
+ * settings. */
+#define        IEEE802154_HW_CSMA_PARAMS       0x00000040
+/* Indicates that transceiver will support ARET frame retries setting. */
+#define        IEEE802154_HW_FRAME_RETRIES     0x00000080
+
+/* This groups the most common CSMA support fields into one. */
+#define IEEE802154_HW_CSMA             (IEEE802154_HW_CCA_MODE | \
+                                        IEEE802154_HW_CCA_ED_LEVEL | \
+                                        IEEE802154_HW_CSMA_PARAMS | \
+                                        IEEE802154_HW_FRAME_RETRIES)
 
 /* struct ieee802154_ops - callbacks from mac802154 to the driver
  *
index 0e3d08e4b1d3e59fa101607d88e4c66f7664566d..57c880378443d42d1e568f717922434afeb00974 100644 (file)
@@ -18,7 +18,6 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 portid;             /* netlink portid of destroyer */
-       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
@@ -216,8 +215,23 @@ void nf_conntrack_ecache_pernet_fini(struct net *net);
 
 int nf_conntrack_ecache_init(void);
 void nf_conntrack_ecache_fini(void);
-#else /* CONFIG_NF_CONNTRACK_EVENTS */
 
+static inline void nf_conntrack_ecache_delayed_work(struct net *net)
+{
+       if (!delayed_work_pending(&net->ct.ecache_dwork)) {
+               schedule_delayed_work(&net->ct.ecache_dwork, HZ);
+               net->ct.ecache_dwork_pending = true;
+       }
+}
+
+static inline void nf_conntrack_ecache_work(struct net *net)
+{
+       if (net->ct.ecache_dwork_pending) {
+               net->ct.ecache_dwork_pending = false;
+               mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
+       }
+}
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
                                            struct nf_conn *ct) {}
 static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
@@ -255,6 +269,14 @@ static inline int nf_conntrack_ecache_init(void)
 static inline void nf_conntrack_ecache_fini(void)
 {
 }
+
+static inline void nf_conntrack_ecache_delayed_work(struct net *net)
+{
+}
+
+static inline void nf_conntrack_ecache_work(struct net *net)
+{
+}
 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
 
 #endif /*_NF_CONNTRACK_ECACHE_H*/
index 99eac12d040ba375e0b1d8712d9a4b3005da5ee5..534e1f2ac4fc36d5baeb7b3115dc95aaebd065d9 100644 (file)
 #define NF_LOG_UID             0x08    /* Log UID owning local socket */
 #define NF_LOG_MASK            0x0f
 
-#define NF_LOG_TYPE_LOG                0x01
-#define NF_LOG_TYPE_ULOG       0x02
+enum nf_log_type {
+       NF_LOG_TYPE_LOG         = 0,
+       NF_LOG_TYPE_ULOG,
+       NF_LOG_TYPE_MAX
+};
 
 struct nf_loginfo {
        u_int8_t type;
@@ -40,10 +43,10 @@ typedef void nf_logfn(struct net *net,
                      const char *prefix);
 
 struct nf_logger {
-       struct module   *me;
-       nf_logfn        *logfn;
-       char            *name;
-       struct list_head        list[NFPROTO_NUMPROTO];
+       char                    *name;
+       enum nf_log_type        type;
+       nf_logfn                *logfn;
+       struct module           *me;
 };
 
 /* Function to register/unregister log function. */
@@ -58,6 +61,13 @@ int nf_log_bind_pf(struct net *net, u_int8_t pf,
                   const struct nf_logger *logger);
 void nf_log_unbind_pf(struct net *net, u_int8_t pf);
 
+int nf_logger_find_get(int pf, enum nf_log_type type);
+void nf_logger_put(int pf, enum nf_log_type type);
+void nf_logger_request_module(int pf, enum nf_log_type type);
+
+#define MODULE_ALIAS_NF_LOGGER(family, type) \
+       MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
+
 /* Calls the registered backend logging function */
 __printf(8, 9)
 void nf_log_packet(struct net *net,
@@ -69,4 +79,24 @@ void nf_log_packet(struct net *net,
                   const struct nf_loginfo *li,
                   const char *fmt, ...);
 
+struct nf_log_buf;
+
+struct nf_log_buf *nf_log_buf_open(void);
+__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
+void nf_log_buf_close(struct nf_log_buf *m);
+
+/* common logging functions */
+int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset);
+int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset,
+                          unsigned int logflags);
+void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+                              unsigned int hooknum, const struct sk_buff *skb,
+                              const struct net_device *in,
+                              const struct net_device *out,
+                              const struct nf_loginfo *loginfo,
+                              const char *prefix);
+
 #endif /* _NF_LOG_H */
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
deleted file mode 100644 (file)
index 9d9756c..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
-
-struct sbuff {
-       unsigned int    count;
-       char            buf[S_SIZE + 1];
-};
-static struct sbuff emergency, *emergency_ptr = &emergency;
-
-static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
-{
-       va_list args;
-       int len;
-
-       if (likely(m->count < S_SIZE)) {
-               va_start(args, f);
-               len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
-               va_end(args);
-               if (likely(m->count + len < S_SIZE)) {
-                       m->count += len;
-                       return 0;
-               }
-       }
-       m->count = S_SIZE;
-       printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
-       return -1;
-}
-
-static struct sbuff *sb_open(void)
-{
-       struct sbuff *m = kmalloc(sizeof(*m), GFP_ATOMIC);
-
-       if (unlikely(!m)) {
-               local_bh_disable();
-               do {
-                       m = xchg(&emergency_ptr, NULL);
-               } while (!m);
-       }
-       m->count = 0;
-       return m;
-}
-
-static void sb_close(struct sbuff *m)
-{
-       m->buf[m->count] = 0;
-       printk("%s\n", m->buf);
-
-       if (likely(m != &emergency))
-               kfree(m);
-       else {
-               emergency_ptr = m;
-               local_bh_enable();
-       }
-}
-
index 773cce308bc61ce312c4a5ecab0464ba84e1ce41..29d6a94db54d6136b6b380d5817341d2bbe83bce 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/list.h>
 #include <linux/list_nulls.h>
 #include <linux/atomic.h>
+#include <linux/workqueue.h>
 #include <linux/netfilter/nf_conntrack_tcp.h>
 #include <linux/seqlock.h>
 
@@ -73,6 +74,10 @@ struct ct_pcpu {
 struct netns_ct {
        atomic_t                count;
        unsigned int            expect_count;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       struct delayed_work ecache_dwork;
+       bool ecache_dwork_pending;
+#endif
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
@@ -82,7 +87,6 @@ struct netns_ct {
 #endif
        char                    *slabname;
        unsigned int            sysctl_log_invalid; /* Log invalid packets */
-       unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_events;
        int                     sysctl_acct;
        int                     sysctl_auto_assign_helper;
index 19d3446e59d2555639e9553b1958d7354792af1e..eade27adecf3678ed2d1568adf34badd38ed88f9 100644 (file)
@@ -28,6 +28,7 @@ struct netns_sysctl_ipv6 {
        int ip6_rt_mtu_expires;
        int ip6_rt_min_advmss;
        int flowlabel_consistency;
+       int auto_flowlabels;
        int icmpv6_time;
        int anycast_src_echo_reply;
        int fwmark_reflect;
index 72240e5ac2c4b2223e9e1706c81974f11d7eafc6..e21b9f9653c011fe11634e2e5e745d4e5d7a9bf9 100644 (file)
@@ -136,6 +136,7 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
 struct net_device *rtnl_create_link(struct net *net, char *ifname,
+                                   unsigned char name_assign_type,
                                    const struct rtnl_link_ops *ops,
                                    struct nlattr *tb[]);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
index 624f9857c83e3d7f2987ef95ecc410ad6f8c744f..a3cfb8ebeb53e2f233ea894fbe7e5a3835255c48 100644 (file)
@@ -231,7 +231,7 @@ struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
        u16                     _pad;
-       unsigned char           data[20];
+       unsigned char           data[24];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index 4b7cd695e43187c39eeae537bdc031d44ba63abc..f22538e68245082a8a8663260007bcae58a50be2 100644 (file)
@@ -118,6 +118,7 @@ typedef enum {
 #define SCTP_MAX_NUM_COMMANDS 14
 
 typedef union {
+       void *zero_all; /* Set to NULL to clear the entire union */
        __s32 i32;
        __u32 u32;
        __be32 be32;
@@ -154,7 +155,7 @@ typedef union {
 static inline sctp_arg_t       \
 SCTP_## name (type arg)                \
 { sctp_arg_t retval;\
-  memset(&retval, 0, sizeof(sctp_arg_t));\
+  retval.zero_all = NULL;\
   retval.elt = arg;\
   return retval;\
 }
@@ -191,7 +192,7 @@ static inline sctp_arg_t SCTP_NOFORCE(void)
 static inline sctp_arg_t SCTP_NULL(void)
 {
        sctp_arg_t retval;
-       memset(&retval, 0, sizeof(sctp_arg_t));
+       retval.zero_all = NULL;
        return retval;
 }
 
@@ -202,27 +203,49 @@ typedef struct {
 
 typedef struct {
        sctp_cmd_t cmds[SCTP_MAX_NUM_COMMANDS];
-       __u8 next_free_slot;
-       __u8 next_cmd;
+       sctp_cmd_t *last_used_slot;
+       sctp_cmd_t *next_cmd;
 } sctp_cmd_seq_t;
 
 
 /* Initialize a block of memory as a command sequence.
  * Return 0 if the initialization fails.
  */
-int sctp_init_cmd_seq(sctp_cmd_seq_t *seq);
+static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
+{
+       /* cmds[] is filled backwards to simplify the overflow BUG() check */
+       seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS;
+       seq->next_cmd = seq->last_used_slot;
+       return 1;               /* We always succeed.  */
+}
+
 
 /* Add a command to an sctp_cmd_seq_t.
  *
  * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
  * to wrap data which goes in the obj argument.
  */
-void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj);
+static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb,
+                                  sctp_arg_t obj)
+{
+       sctp_cmd_t *cmd = seq->last_used_slot - 1;
+
+       BUG_ON(cmd < seq->cmds);
+
+       cmd->verb = verb;
+       cmd->obj = obj;
+       seq->last_used_slot = cmd;
+}
 
 /* Return the next command structure in an sctp_cmd_seq.
  * Return NULL at the end of the sequence.
  */
-sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq);
+static inline sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
+{
+       if (seq->next_cmd <= seq->last_used_slot)
+               return NULL;
 
-#endif /* __net_sctp_command_h__ */
+       return --seq->next_cmd;
+}
 
+#endif /* __net_sctp_command_h__ */
index 8e4de46c052ec7d78188f203df2b69b0a0b423ab..90c1cccd164d11bb8a42fc03e313d711cc67e882 100644 (file)
@@ -109,6 +109,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
 /*
  * sctp/primitive.c
@@ -388,27 +389,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
        return (head->next != head) && (head->next == head->prev);
 }
 
-/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
-static inline __s32 sctp_jitter(__u32 rto)
-{
-       static __u32 sctp_rand;
-       __s32 ret;
-
-       /* Avoid divide by zero. */
-       if (!rto)
-               rto = 1;
-
-       sctp_rand += jiffies;
-       sctp_rand ^= (sctp_rand << 12);
-       sctp_rand ^= (sctp_rand >> 20);
-
-       /* Choose random number from 0 to rto, then move to -50% ~ +50%
-        * of rto.
-        */
-       ret = sctp_rand % rto - (rto >> 1);
-       return ret;
-}
-
 /* Break down data chunks at this point.  */
 static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 {
index f38588bf3462d9e2374258bb6c383e38413507ed..7741d1b66967a3f198bb33017d22103454de5a94 100644 (file)
@@ -207,7 +207,9 @@ struct sctp_sock {
        struct sctp_paddrparams paddrparam;
        struct sctp_event_subscribe subscribe;
        struct sctp_assocparams assocparams;
+
        int user_frag;
+
        __u32 autoclose;
        __u8 nodelay;
        __u8 disable_fragments;
@@ -215,6 +217,8 @@ struct sctp_sock {
        __u8 frag_interleave;
        __u32 adaptation_ind;
        __u32 pd_point;
+       __u8 recvrcvinfo;
+       __u8 recvnxtinfo;
 
        atomic_t pd_mode;
        /* Receive to here while partial delivery is in effect. */
@@ -1919,7 +1923,8 @@ struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
 /* A convenience structure to parse out SCTP specific CMSGs. */
 typedef struct sctp_cmsgs {
        struct sctp_initmsg *init;
-       struct sctp_sndrcvinfo *info;
+       struct sctp_sndrcvinfo *srinfo;
+       struct sctp_sndinfo *sinfo;
 } sctp_cmsgs_t;
 
 /* Structure for tracking memory objects */
index daacb32b55b576359443cb57d184a8c6639d4956..cccdcfd149736b315554d64c2a556e0ad6496fc8 100644 (file)
@@ -129,7 +129,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
        const struct sctp_association *asoc, gfp_t gfp);
 
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
-       struct msghdr *);
+                                  struct msghdr *);
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *);
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *, struct sock *sk);
+
 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
 
 /* Is this event type enabled? */
@@ -155,10 +160,3 @@ static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
 }
 
 #endif /* __sctp_ulpevent_h__ */
-
-
-
-
-
-
-
index 1563507457002532edd3ad0dfd7419c3a8cde42a..28f734601b506e57e5985f47005e0ee2e22b42f9 100644 (file)
@@ -181,7 +181,8 @@ struct sock_common {
        unsigned short          skc_family;
        volatile unsigned char  skc_state;
        unsigned char           skc_reuse:4;
-       unsigned char           skc_reuseport:4;
+       unsigned char           skc_reuseport:1;
+       unsigned char           skc_ipv6only:1;
        int                     skc_bound_dev_if;
        union {
                struct hlist_node       skc_bind_node;
@@ -272,6 +273,7 @@ struct cg_proto;
   *    @sk_rcvtimeo: %SO_RCVTIMEO setting
   *    @sk_sndtimeo: %SO_SNDTIMEO setting
   *    @sk_rxhash: flow hash received from netif layer
+  *    @sk_txhash: computed flow hash for use on transmit
   *    @sk_filter: socket filtering instructions
   *    @sk_protinfo: private area, net family specific, when not using slab
   *    @sk_timer: sock cleanup timer
@@ -317,6 +319,7 @@ struct sock {
 #define sk_state               __sk_common.skc_state
 #define sk_reuse               __sk_common.skc_reuse
 #define sk_reuseport           __sk_common.skc_reuseport
+#define sk_ipv6only            __sk_common.skc_ipv6only
 #define sk_bound_dev_if                __sk_common.skc_bound_dev_if
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
@@ -345,6 +348,7 @@ struct sock {
 #ifdef CONFIG_RPS
        __u32                   sk_rxhash;
 #endif
+       __u32                   sk_txhash;
 #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int            sk_napi_id;
        unsigned int            sk_ll_usec;
@@ -656,6 +660,20 @@ static inline void sk_add_bind_node(struct sock *sk,
 #define sk_for_each_bound(__sk, list) \
        hlist_for_each_entry(__sk, list, sk_bind_node)
 
+/**
+ * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @head:      the head for your list.
+ * @offset:    offset of hlist_node within the struct.
+ *
+ */
+#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset)                       \
+       for (pos = (head)->first;                                              \
+            (!is_a_nulls(pos)) &&                                             \
+               ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
+            pos = pos->next)
+
 static inline struct user_namespace *sk_user_ns(struct sock *sk)
 {
        /* Careful only use this in a context where these parameters
@@ -1978,6 +1996,14 @@ static inline void sock_poll_wait(struct file *filp,
        }
 }
 
+static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk->sk_txhash) {
+               skb->l4_hash = 1;
+               skb->hash = sk->sk_txhash;
+       }
+}
+
 /*
  *     Queue a received datagram if it will fit. Stream and sequenced
  *     protocols can't normally use this as they need to fit buffers in
@@ -1992,6 +2018,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = sock_wfree;
+       skb_set_hash_from_sk(skb, sk);
        /*
         * We used to take a refcount on sk, but following operation
         * is enough to guarantee sk_free() wont free this sock until
index 7286db80e8b8b6532cb4148a283f2f6d8eb3b530..0aeb2eb749dc9c118b3465e6adc1bb7bc8accfee 100644 (file)
@@ -493,14 +493,8 @@ static inline u32 tcp_cookie_time(void)
 
 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
                              u16 *mssp);
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
-#else
-static inline __u32 cookie_v4_init_sequence(struct sock *sk,
-                                           struct sk_buff *skb,
-                                           __u16 *mss)
-{
-       return 0;
-}
+__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mss);
 #endif
 
 __u32 cookie_init_timestamp(struct request_sock *req);
@@ -516,13 +510,6 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
                              const struct tcphdr *th, u16 *mssp);
 __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
                              __u16 *mss);
-#else
-static inline __u32 cookie_v6_init_sequence(struct sock *sk,
-                                           struct sk_buff *skb,
-                                           __u16 *mss)
-{
-       return 0;
-}
 #endif
 /* tcp_output.c */
 
@@ -941,7 +928,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+void tcp_enter_cwr(struct sock *sk);
 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
 /* The maximum number of MSS of available cwnd for which TSO defers
@@ -1098,7 +1085,7 @@ static inline int tcp_full_space(const struct sock *sk)
 
 static inline void tcp_openreq_init(struct request_sock *req,
                                    struct tcp_options_received *rx_opt,
-                                   struct sk_buff *skb)
+                                   struct sk_buff *skb, struct sock *sk)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -1106,7 +1093,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        req->cookie_ts = 0;
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-       tcp_rsk(req)->snt_synack = 0;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1117,6 +1104,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->ecn_ok = 0;
        ireq->ir_rmt_port = tcp_hdr(skb)->source;
        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
+       ireq->ir_mark = inet_request_mark(sk, skb);
 }
 
 extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1585,6 +1573,11 @@ int tcp4_proc_init(void);
 void tcp4_proc_exit(void);
 #endif
 
+int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                    const struct tcp_request_sock_ops *af_ops,
+                    struct sock *sk, struct sk_buff *skb);
+
 /* TCP af-specific functions */
 struct tcp_sock_af_ops {
 #ifdef CONFIG_TCP_MD5SIG
@@ -1602,6 +1595,7 @@ struct tcp_sock_af_ops {
 };
 
 struct tcp_request_sock_ops {
+       u16 mss_clamp;
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
                                                struct request_sock *req);
@@ -1611,8 +1605,39 @@ struct tcp_request_sock_ops {
                                                  const struct request_sock *req,
                                                  const struct sk_buff *skb);
 #endif
+       void (*init_req)(struct request_sock *req, struct sock *sk,
+                        struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
+       __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
+                                __u16 *mss);
+#endif
+       struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
+                                      const struct request_sock *req,
+                                      bool *strict);
+       __u32 (*init_seq)(const struct sk_buff *skb);
+       int (*send_synack)(struct sock *sk, struct dst_entry *dst,
+                          struct flowi *fl, struct request_sock *req,
+                          u16 queue_mapping, struct tcp_fastopen_cookie *foc);
+       void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
+                              const unsigned long timeout);
 };
 
+#ifdef CONFIG_SYN_COOKIES
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+                                        struct sock *sk, struct sk_buff *skb,
+                                        __u16 *mss)
+{
+       return ops->cookie_init_seq(sk, skb, mss);
+}
+#else
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+                                        struct sock *sk, struct sk_buff *skb,
+                                        __u16 *mss)
+{
+       return 0;
+}
+#endif
+
 int tcpv4_offload_init(void);
 
 void tcp_v4_init(void);
index 68a1fefe3dfe46c3fc8f4847fa074fdb237573bb..70f941368ace488dc93f4e78f81b1dca935cd87f 100644 (file)
@@ -176,6 +176,35 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                     int (*)(const struct sock *, const struct sock *),
                     unsigned int hash2_nulladdr);
 
+static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
+                                      int min, int max, bool use_eth)
+{
+       u32 hash;
+
+       if (min >= max) {
+               /* Use default range */
+               inet_get_local_port_range(net, &min, &max);
+       }
+
+       hash = skb_get_hash(skb);
+       if (unlikely(!hash) && use_eth) {
+               /* Can't find a normal hash, caller has indicated an Ethernet
+                * packet so use that to compute a hash.
+                */
+               hash = jhash(skb->data, 2 * ETH_ALEN,
+                            (__force u32) skb->protocol);
+       }
+
+       /* Since this is being sent on the wire obfuscate hash a bit
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only upper 16 bits are relevant in the
+        * computation for 16 bit port value.
+        */
+       hash ^= hash << 16;
+
+       return htons((((u64) hash * (max - min)) >> 32) + min);
+}
+
 /* net/ipv4/udp.c */
 void udp_v4_early_demux(struct sk_buff *skb);
 int udp_get_port(struct sock *sk, unsigned short snum,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
new file mode 100644 (file)
index 0000000..ffd69cb
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef __NET_UDP_TUNNEL_H
+#define __NET_UDP_TUNNEL_H
+
+struct udp_port_cfg {
+       u8                      family;
+
+       /* Used only for kernel-created sockets */
+       union {
+               struct in_addr          local_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+               struct in6_addr         local_ip6;
+#endif
+       };
+
+       union {
+               struct in_addr          peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+               struct in6_addr         peer_ip6;
+#endif
+       };
+
+       __be16                  local_udp_port;
+       __be16                  peer_udp_port;
+       unsigned int            use_udp_checksums:1,
+                               use_udp6_tx_checksums:1,
+                               use_udp6_rx_checksums:1;
+};
+
+int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+                   struct socket **sockp);
+
+#endif
index 12196ce661d9e288a3d3928ccd66cefff10920a9..d5f59f3fc35df67141c8234a8741ab5b49501ad1 100644 (file)
@@ -45,8 +45,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
-
 /* IP header + UDP + VXLAN + Ethernet header */
 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
 /* IPv6 header + UDP + VXLAN + Ethernet header */
index 813d11f549774aadf5f3d87ba28be840e7f6e399..3e4323a3918d7266efc46edb622636621f042afc 100644 (file)
@@ -92,11 +92,12 @@ struct can_ctrlmode {
 };
 
 #define CAN_CTRLMODE_LOOPBACK          0x01    /* Loopback mode */
-#define CAN_CTRLMODE_LISTENONLY                0x02    /* Listen-only mode */
+#define CAN_CTRLMODE_LISTENONLY                0x02    /* Listen-only mode */
 #define CAN_CTRLMODE_3_SAMPLES         0x04    /* Triple sampling mode */
 #define CAN_CTRLMODE_ONE_SHOT          0x08    /* One-Shot mode */
 #define CAN_CTRLMODE_BERR_REPORTING    0x10    /* Bus-error reporting */
 #define CAN_CTRLMODE_FD                        0x20    /* CAN FD mode */
+#define CAN_CTRLMODE_PRESUME_ACK       0x40    /* Ignore missing CAN ACKs */
 
 /*
  * CAN device statistics
index b38534895db5608b9ddad5be74f50e989263277c..ff957604a7213136d72bf6ec0b34989e7637ccc4 100644 (file)
@@ -204,11 +204,17 @@ enum {
        IFLA_INET6_CACHEINFO,   /* time values and max reasm size */
        IFLA_INET6_ICMP6STATS,  /* statistics (icmpv6)          */
        IFLA_INET6_TOKEN,       /* device token                 */
+       IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
        __IFLA_INET6_MAX
 };
 
 #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1)
 
+enum in6_addr_gen_mode {
+       IN6_ADDR_GEN_MODE_EUI64,
+       IN6_ADDR_GEN_MODE_NONE,
+};
+
 enum {
        BRIDGE_MODE_UNSPEC,
        BRIDGE_MODE_HAIRPIN,
index 0d8e0f0342dc183acd5393e11bfe203f019ca165..22b7a69619d87446904480342ede77cdb295164c 100644 (file)
@@ -233,6 +233,7 @@ struct in6_flowlabel_req {
 #if 0  /* not yet */
 #define IPV6_USE_MIN_MTU       63
 #endif
+#define IPV6_AUTOFLOWLABEL     64
 
 /*
  * Netfilter (1)
index 593b0e32d956c931d667405c200767a122b43bd7..efa2666f4b8af10ca72c26206f82e527d7adc8e6 100644 (file)
@@ -163,6 +163,7 @@ enum {
        DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_SUPPRESS_FRAG_NDISC,
+       DEVCONF_ACCEPT_RA_FROM_LOCAL,
        DEVCONF_MAX
 };
 
index fdfbd1c17065e33fb45a3e70ebcc6d9cf6891dac..55818543342d4ec794023977f3c70842ab4917e9 100644 (file)
 #define INIT_NETDEV_GROUP      0
 
 
+/* interface name assignment types (sysfs name_assign_type attribute) */
+#define NET_NAME_UNKNOWN       0       /* unknown origin (not exposed to userspace) */
+#define NET_NAME_ENUM          1       /* enumerated by kernel */
+#define NET_NAME_PREDICTABLE   2       /* predictably named by the kernel */
+#define NET_NAME_USER          3       /* provided by user-space */
+#define NET_NAME_RENAMED       4       /* renamed by user-space */
 
 /* Media selection options. */
 enum {
index 2a88f645a5d821c47d7a53a05dc7a0e083a72342..801bdd1e56e33b168b54705e0bfb8464e076f3b5 100644 (file)
@@ -697,6 +697,8 @@ enum nft_counter_attributes {
  * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
  * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
  * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ * @NFTA_LOG_LEVEL: log level (NLA_U32)
+ * @NFTA_LOG_FLAGS: logging flags (NLA_U32)
  */
 enum nft_log_attributes {
        NFTA_LOG_UNSPEC,
@@ -704,6 +706,8 @@ enum nft_log_attributes {
        NFTA_LOG_PREFIX,
        NFTA_LOG_SNAPLEN,
        NFTA_LOG_QTHRESHOLD,
+       NFTA_LOG_LEVEL,
+       NFTA_LOG_FLAGS,
        __NFTA_LOG_MAX
 };
 #define NFTA_LOG_MAX           (__NFTA_LOG_MAX - 1)
index 348717c3a22f6660b106edd712b6e95df0444b68..0fbad8ef96de9ecd00262800e4dda07564bbb13b 100644 (file)
@@ -14,6 +14,5 @@ header-y += ebt_nflog.h
 header-y += ebt_pkttype.h
 header-y += ebt_redirect.h
 header-y += ebt_stp.h
-header-y += ebt_ulog.h
 header-y += ebt_vlan.h
 header-y += ebtables.h
diff --git a/include/uapi/linux/netfilter_bridge/ebt_ulog.h b/include/uapi/linux/netfilter_bridge/ebt_ulog.h
deleted file mode 100644 (file)
index 89a6bec..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _EBT_ULOG_H
-#define _EBT_ULOG_H
-
-#include <linux/types.h>
-
-#define EBT_ULOG_DEFAULT_NLGROUP 0
-#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
-#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
-#define EBT_ULOG_PREFIX_LEN 32
-#define EBT_ULOG_MAX_QLEN 50
-#define EBT_ULOG_WATCHER "ulog"
-#define EBT_ULOG_VERSION 1
-
-struct ebt_ulog_info {
-       __u32 nlgroup;
-       unsigned int cprange;
-       unsigned int qthreshold;
-       char prefix[EBT_ULOG_PREFIX_LEN];
-};
-
-typedef struct ebt_ulog_packet_msg {
-       int version;
-       char indev[IFNAMSIZ];
-       char outdev[IFNAMSIZ];
-       char physindev[IFNAMSIZ];
-       char physoutdev[IFNAMSIZ];
-       char prefix[EBT_ULOG_PREFIX_LEN];
-       struct timeval stamp;
-       unsigned long mark;
-       unsigned int hook;
-       size_t data_len;
-       /* The complete packet, including Ethernet header and perhaps
-        * the VLAN header is appended */
-       unsigned char data[0] __attribute__
-                             ((aligned (__alignof__(struct ebt_ulog_info))));
-} ebt_ulog_packet_msg_t;
-
-#endif /* _EBT_ULOG_H */
index fb008437dde111f01db08e9f7fa442ab57e66dda..ecb291df390e584a756e057fbfc8baba9f17f63d 100644 (file)
@@ -5,7 +5,6 @@ header-y += ipt_ECN.h
 header-y += ipt_LOG.h
 header-y += ipt_REJECT.h
 header-y += ipt_TTL.h
-header-y += ipt_ULOG.h
 header-y += ipt_ah.h
 header-y += ipt_ecn.h
 header-y += ipt_ttl.h
diff --git a/include/uapi/linux/netfilter_ipv4/ipt_ULOG.h b/include/uapi/linux/netfilter_ipv4/ipt_ULOG.h
deleted file mode 100644 (file)
index 417aad2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Header file for IP tables userspace logging, Version 1.8
- *
- * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
- * 
- * Distributed under the terms of GNU GPL */
-
-#ifndef _IPT_ULOG_H
-#define _IPT_ULOG_H
-
-#ifndef NETLINK_NFLOG
-#define NETLINK_NFLOG  5
-#endif
-
-#define ULOG_DEFAULT_NLGROUP   1
-#define ULOG_DEFAULT_QTHRESHOLD        1
-
-#define ULOG_MAC_LEN   80
-#define ULOG_PREFIX_LEN        32
-
-#define ULOG_MAX_QLEN  50
-/* Why 50? Well... there is a limit imposed by the slab cache 131000
- * bytes. So the multipart netlink-message has to be < 131000 bytes.
- * Assuming a standard ethernet-mtu of 1500, we could define this up
- * to 80... but even 50 seems to be big enough. */
-
-/* private data structure for each rule with a ULOG target */
-struct ipt_ulog_info {
-       unsigned int nl_group;
-       size_t copy_range;
-       size_t qthreshold;
-       char prefix[ULOG_PREFIX_LEN];
-};
-
-/* Format of the ULOG packets passed through netlink */
-typedef struct ulog_packet_msg {
-       unsigned long mark;
-       long timestamp_sec;
-       long timestamp_usec;
-       unsigned int hook;
-       char indev_name[IFNAMSIZ];
-       char outdev_name[IFNAMSIZ];
-       size_t data_len;
-       char prefix[ULOG_PREFIX_LEN];
-       unsigned char mac_len;
-       unsigned char mac[ULOG_MAC_LEN];
-       unsigned char payload[0];
-} ulog_packet_msg_t;
-
-#endif /*_IPT_ULOG_H*/
index be9519b52bb10edef5e5be12ddd3ff2065d706ac..f1db15b9c041ccad0f13a4206e73245b59e7c236 100644 (file)
@@ -1591,6 +1591,9 @@ enum nl80211_commands {
  *     creation then the new interface will be owned by the netlink socket
  *     that created it and will be destroyed when the socket is closed
  *
+ * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
+ *     the TDLS link initiator.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1931,6 +1934,8 @@ enum nl80211_attrs {
        NL80211_ATTR_CSA_C_OFFSETS_TX,
        NL80211_ATTR_MAX_CSA_COUNTERS,
 
+       NL80211_ATTR_TDLS_INITIATOR,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
index 266022a2be4acae990ed56c3efe783766d667d6e..ce70fe6b45df3e841c35accbdb6379c16563893c 100644 (file)
@@ -95,6 +95,9 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_GET_ASSOC_ID_LIST 29      /* Read only */
 #define SCTP_AUTO_ASCONF       30
 #define SCTP_PEER_ADDR_THLDS   31
+#define SCTP_RECVRCVINFO       32
+#define SCTP_RECVNXTINFO       33
+#define SCTP_DEFAULT_SNDINFO   34
 
 /* Internal Socket Options. Some of the sctp library functions are
  * implemented using these socket options.
@@ -110,8 +113,14 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_SOCKOPT_CONNECTX3 111     /* CONNECTX requests (updated) */
 #define SCTP_GET_ASSOC_STATS   112     /* Read only */
 
-/*
- * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
+/* These are bit fields for msghdr->msg_flags.  See section 5.1.  */
+/* On user space Linux, these live in <bits/socket.h> as an enum.  */
+enum sctp_msg_flags {
+       MSG_NOTIFICATION = 0x8000,
+#define MSG_NOTIFICATION MSG_NOTIFICATION
+};
+
+/* 5.3.1 SCTP Initiation Structure (SCTP_INIT)
  *
  *   This cmsghdr structure provides information for initializing new
  *   SCTP associations with sendmsg().  The SCTP_INITMSG socket option
@@ -121,7 +130,6 @@ typedef __s32 sctp_assoc_t;
  *   cmsg_level    cmsg_type      cmsg_data[]
  *   ------------  ------------   ----------------------
  *   IPPROTO_SCTP  SCTP_INIT      struct sctp_initmsg
- *
  */
 struct sctp_initmsg {
        __u16 sinit_num_ostreams;
@@ -130,8 +138,7 @@ struct sctp_initmsg {
        __u16 sinit_max_init_timeo;
 };
 
-/*
- * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+/* 5.3.2 SCTP Header Information Structure (SCTP_SNDRCV)
  *
  *   This cmsghdr structure specifies SCTP options for sendmsg() and
  *   describes SCTP header information about a received message through
@@ -140,7 +147,6 @@ struct sctp_initmsg {
  *   cmsg_level    cmsg_type      cmsg_data[]
  *   ------------  ------------   ----------------------
  *   IPPROTO_SCTP  SCTP_SNDRCV    struct sctp_sndrcvinfo
- *
  */
 struct sctp_sndrcvinfo {
        __u16 sinfo_stream;
@@ -154,19 +160,74 @@ struct sctp_sndrcvinfo {
        sctp_assoc_t sinfo_assoc_id;
 };
 
+/* 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
+ *
+ *   This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_SNDINFO   struct sctp_sndinfo
+ */
+struct sctp_sndinfo {
+       __u16 snd_sid;
+       __u16 snd_flags;
+       __u32 snd_ppid;
+       __u32 snd_context;
+       sctp_assoc_t snd_assoc_id;
+};
+
+/* 5.3.5 SCTP Receive Information Structure (SCTP_RCVINFO)
+ *
+ *   This cmsghdr structure describes SCTP receive information
+ *   about a received message through recvmsg().
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_RCVINFO   struct sctp_rcvinfo
+ */
+struct sctp_rcvinfo {
+       __u16 rcv_sid;
+       __u16 rcv_ssn;
+       __u16 rcv_flags;
+       __u32 rcv_ppid;
+       __u32 rcv_tsn;
+       __u32 rcv_cumtsn;
+       __u32 rcv_context;
+       sctp_assoc_t rcv_assoc_id;
+};
+
+/* 5.3.6 SCTP Next Receive Information Structure (SCTP_NXTINFO)
+ *
+ *   This cmsghdr structure describes SCTP receive information
+ *   of the next message that will be delivered through recvmsg()
+ *   if this information is already available when delivering
+ *   the current message.
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_NXTINFO   struct sctp_nxtinfo
+ */
+struct sctp_nxtinfo {
+       __u16 nxt_sid;
+       __u16 nxt_flags;
+       __u32 nxt_ppid;
+       __u32 nxt_length;
+       sctp_assoc_t nxt_assoc_id;
+};
+
 /*
  *  sinfo_flags: 16 bits (unsigned integer)
  *
  *   This field may contain any of the following flags and is composed of
  *   a bitwise OR of these values.
  */
-
 enum sctp_sinfo_flags {
-       SCTP_UNORDERED = 1,  /* Send/receive message unordered. */
-       SCTP_ADDR_OVER = 2,  /* Override the primary destination. */
-       SCTP_ABORT=4,        /* Send an ABORT message to the peer. */
-       SCTP_SACK_IMMEDIATELY = 8,      /* SACK should be sent without delay */
-       SCTP_EOF=MSG_FIN,    /* Initiate graceful shutdown process. */
+       SCTP_UNORDERED          = (1 << 0), /* Send/receive message unordered. */
+       SCTP_ADDR_OVER          = (1 << 1), /* Override the primary destination. */
+       SCTP_ABORT              = (1 << 2), /* Send an ABORT message to the peer. */
+       SCTP_SACK_IMMEDIATELY   = (1 << 3), /* SACK should be sent without delay. */
+       SCTP_NOTIFICATION       = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
+       SCTP_EOF                = MSG_FIN,  /* Initiate graceful shutdown process. */
 };
 
 typedef union {
@@ -177,10 +238,16 @@ typedef union {
 
 /* These are cmsg_types.  */
 typedef enum sctp_cmsg_type {
-       SCTP_INIT,              /* 5.2.1 SCTP Initiation Structure */
+       SCTP_INIT,              /* 5.2.1 SCTP Initiation Structure */
 #define SCTP_INIT      SCTP_INIT
-       SCTP_SNDRCV,            /* 5.2.2 SCTP Header Information Structure */
+       SCTP_SNDRCV,            /* 5.2.2 SCTP Header Information Structure */
 #define SCTP_SNDRCV    SCTP_SNDRCV
+       SCTP_SNDINFO,           /* 5.3.4 SCTP Send Information Structure */
+#define SCTP_SNDINFO   SCTP_SNDINFO
+       SCTP_RCVINFO,           /* 5.3.5 SCTP Receive Information Structure */
+#define SCTP_RCVINFO   SCTP_RCVINFO
+       SCTP_NXTINFO,           /* 5.3.6 SCTP Next Receive Information Structure */
+#define SCTP_NXTINFO   SCTP_NXTINFO
 } sctp_cmsg_t;
 
 /*
@@ -808,13 +875,6 @@ struct sctp_assoc_stats {
        __u64           sas_ictrlchunks; /* Control chunks received */
 };
 
-/* These are bit fields for msghdr->msg_flags.  See section 5.1.  */
-/* On user space Linux, these live in <bits/socket.h> as an enum.  */
-enum sctp_msg_flags {
-       MSG_NOTIFICATION = 0x8000,
-#define MSG_NOTIFICATION MSG_NOTIFICATION
-};
-
 /*
  * 8.1 sctp_bindx()
  *
index 6d6721341f498656e2dc8996326d3b561e3f9581..43aaba1cc0372c050f5d5caf0903609ed46f73e0 100644 (file)
@@ -568,6 +568,7 @@ enum {
        NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
        NET_IPV6_PROXY_NDP=23,
        NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
+       NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
        __NET_IPV6_MAX
 };
 
index 41a76acbb305f85cb4cb0ec6dfab9cab1e20e1d4..876d0a14863c391e801ad9f636973f1470769c77 100644 (file)
 
 #define TIPC_MIN_LINK_WIN 16
 #define TIPC_DEF_LINK_WIN 50
-#define TIPC_MAX_LINK_WIN 150
+#define TIPC_MAX_LINK_WIN 8191
 
 
 struct tipc_node_info {
index 653cbbd9e7ad4dcb1ee0647c6d11cc453b1e81a9..e4ba9a5a5ccb45be2a5291b0f0848f90f73c0851 100644 (file)
@@ -522,6 +522,7 @@ static const struct bin_table bin_net_ipv6_conf_var_table[] = {
        { CTL_INT,      NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN,    "accept_ra_rt_info_max_plen" },
        { CTL_INT,      NET_IPV6_PROXY_NDP,                     "proxy_ndp" },
        { CTL_INT,      NET_IPV6_ACCEPT_SOURCE_ROUTE,           "accept_source_route" },
+       { CTL_INT,      NET_IPV6_ACCEPT_RA_FROM_LOCAL,          "accept_ra_from_local" },
        {}
 };
 
index 21a7b2135af6bf29e9d75b82b95da7c03381ed8b..9a907d489d9517e7cb86e5e33f19f47285c4cac1 100644 (file)
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
 MODULE_DESCRIPTION("Various CRC32 calculations");
 MODULE_LICENSE("GPL");
 
-#define GF2_DIM                32
-
-static u32 gf2_matrix_times(u32 *mat, u32 vec)
-{
-       u32 sum = 0;
-
-       while (vec) {
-               if (vec & 1)
-                       sum ^= *mat;
-               vec >>= 1;
-               mat++;
-       }
-
-       return sum;
-}
-
-static void gf2_matrix_square(u32 *square, u32 *mat)
-{
-       int i;
-
-       for (i = 0; i < GF2_DIM; i++)
-               square[i] = gf2_matrix_times(mat, mat[i]);
-}
-
 #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
 
 /* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32
+static inline u32 __pure
 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 {
 # ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 }
 #endif
 
-/* For conditions of distribution and use, see copyright notice in zlib.h */
-static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
-                                u32 polynomial)
-{
-       u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
-       u32 odd[GF2_DIM];  /* Odd-power-of-two zeros operator  */
-       u32 row;
-       int i;
-
-       if (len2 <= 0)
-               return crc1;
-
-       /* Put operator for one zero bit in odd */
-       odd[0] = polynomial;
-       row = 1;
-       for (i = 1; i < GF2_DIM; i++) {
-               odd[i] = row;
-               row <<= 1;
-       }
-
-       gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
-       gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
-
-       /* Apply len2 zeros to crc1 (first square will put the operator for one
-        * zero byte, eight zero bits, in even).
-        */
-       do {
-               /* Apply zeros operator for this bit of len2 */
-               gf2_matrix_square(even, odd);
-               if (len2 & 1)
-                       crc1 = gf2_matrix_times(even, crc1);
-               len2 >>= 1;
-               /* If no more bits set, then done */
-               if (len2 == 0)
-                       break;
-               /* Another iteration of the loop with odd and even swapped */
-               gf2_matrix_square(odd, even);
-               if (len2 & 1)
-                       crc1 = gf2_matrix_times(odd, crc1);
-               len2 >>= 1;
-       } while (len2 != 0);
-
-       crc1 ^= crc2;
-       return crc1;
-}
 
 /**
  * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
                        (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
 }
 #endif
-u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
 {
-       return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+       u32 product = x & 1 ? y : 0;
+       int i;
+
+       for (i = 0; i < 31; i++) {
+               product = (product >> 1) ^ (product & 1 ? modulus : 0);
+               x >>= 1;
+               product ^= x & 1 ? y : 0;
+       }
+
+       return product;
 }
 
-u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+/**
+ * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+                                                  u32 polynomial)
 {
-       return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+       u32 power = polynomial; /* CRC of x^32 */
+       int i;
+
+       /* Shift up to 32 bits in the simple linear way */
+       for (i = 0; i < 8 * (int)(len & 3); i++)
+               crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+       len >>= 2;
+       if (!len)
+               return crc;
+
+       for (;;) {
+               /* "power" is x^(2^i), modulo the polynomial */
+               if (len & 1)
+                       crc = gf2_multiply(crc, power, polynomial);
+
+               len >>= 1;
+               if (!len)
+                       break;
+
+               /* Square power, advancing to x^(2^(i+1)) */
+               power = gf2_multiply(power, power, polynomial);
+       }
+
+       return crc;
 }
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(crc32_le_combine);
-EXPORT_SYMBOL(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_combine);
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+       return crc32_generic_shift(crc, len, CRCPOLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+       return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
 
 /**
  * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
 #ifdef CONFIG_CRC32_SELFTEST
 
 /* 4096 random bytes */
-static u8 __attribute__((__aligned__(8))) test_buf[] =
+static u8 const __aligned(8) test_buf[] __initconst =
 {
        0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
        0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
        u32 crc_le;     /* expected crc32_le result */
        u32 crc_be;     /* expected crc32_be result */
        u32 crc32c_le;  /* expected crc32c_le result */
-} test[] =
+} const test[] __initconst =
 {
        {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
        {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
index 7288e38e17575952664af1df3a69ba488812c8e3..c9afbe2c445af155e2d2912ca362efae4ce02950 100644 (file)
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
                char buf[PREFIX_SIZE];
 
                res = dev_printk_emit(7, dev->dev.parent,
-                                     "%s%s %s %s: %pV",
+                                     "%s%s %s %s%s: %pV",
                                      dynamic_emit_prefix(descriptor, buf),
                                      dev_driver_string(dev->dev.parent),
                                      dev_name(dev->dev.parent),
-                                     netdev_name(dev), &vaf);
+                                     netdev_name(dev), netdev_reg_state(dev),
+                                     &vaf);
        } else if (dev) {
-               res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+               res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+                            netdev_reg_state(dev), &vaf);
        } else {
                res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
        }
index 2e3c52c8d050b5ecf21afcd03c5f6d689e99c282..148fc6e99ef63b351977bda6a3b4835e9d24083f 100644 (file)
@@ -3,24 +3,24 @@
 #include <linux/ctype.h>
 #include <linux/kernel.h>
 
-int mac_pton(const char *s, u8 *mac)
+bool mac_pton(const char *s, u8 *mac)
 {
        int i;
 
        /* XX:XX:XX:XX:XX:XX */
        if (strlen(s) < 3 * ETH_ALEN - 1)
-               return 0;
+               return false;
 
        /* Don't dirty result unless string is valid MAC. */
        for (i = 0; i < ETH_ALEN; i++) {
                if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
-                       return 0;
+                       return false;
                if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
-                       return 0;
+                       return false;
        }
        for (i = 0; i < ETH_ALEN; i++) {
                mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
        }
-       return 1;
+       return true;
 }
 EXPORT_SYMBOL(mac_pton);
index 05eea6b98bb85ec54b4ad86f5f4e9d0650068588..7c174b6750cd654c49d7c1d2f621eb9fc3ed1ef0 100644 (file)
@@ -126,6 +126,6 @@ static void fc_setup(struct net_device *dev)
  */
 struct net_device *alloc_fcdev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "fc%d", fc_setup);
+       return alloc_netdev(sizeof_priv, "fc%d", NET_NAME_UNKNOWN, fc_setup);
 }
 EXPORT_SYMBOL(alloc_fcdev);
index 9cda40661e0d300112008fab1c633bb938bac4b6..59e7346f1193a612f9118b4df85426406aea5549 100644 (file)
@@ -207,7 +207,8 @@ static void fddi_setup(struct net_device *dev)
  */
 struct net_device *alloc_fddidev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup);
+       return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN,
+                           fddi_setup);
 }
 EXPORT_SYMBOL(alloc_fddidev);
 
index 5ff2a718ddcac1624809f563f5dfc12d5cad46f9..2e03f8259dd55a575f1b147987631cdced3a91a9 100644 (file)
@@ -228,7 +228,8 @@ static void hippi_setup(struct net_device *dev)
 
 struct net_device *alloc_hippi_dev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "hip%d", hippi_setup);
+       return alloc_netdev(sizeof_priv, "hip%d", NET_NAME_UNKNOWN,
+                           hippi_setup);
 }
 
 EXPORT_SYMBOL(alloc_hippi_dev);
index 44ebd5c2cd4aef0f86bd6475132cc40c501c6fef..cba9c212a730059be075a93eedbd825aea6a696a 100644 (file)
@@ -250,7 +250,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
+                              NET_NAME_UNKNOWN, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
index dd11f612e03e42684a6732dd84aee0995415c710..35a6b6b15e8a1d9e2f846b18aa8c448ad0e915fd 100644 (file)
@@ -385,6 +385,8 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIPHY:
        case SIOCGMIIREG:
        case SIOCSMIIREG:
+       case SIOCSHWTSTAMP:
+       case SIOCGHWTSTAMP:
                if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
                        err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
                break;
index 0004cbaac4a41d9737e017e257c9083df4969b8a..e86a9bea1d160ccc1a739eee576d0bdbf0483956 100644 (file)
@@ -959,7 +959,6 @@ static int p9_client_version(struct p9_client *c)
                break;
        default:
                return -EINVAL;
-               break;
        }
 
        if (IS_ERR(req))
index bfcf6be1d665c89e3f88c4f9e34c452a03ced57d..c00897f65a31eb2a7f9cf8fa8da71bfd56cc7fc5 100644 (file)
@@ -1805,7 +1805,7 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                long amount = 0;
 
                if (skb)
-               amount = skb->len - sizeof(struct ddpehdr);
+                       amount = skb->len - sizeof(struct ddpehdr);
                rc = put_user(amount, (int __user *)argp);
                break;
        }
index 6c8016f618661d931467bf2a005ee1933eec1797..e4158b8b926d39c725e6fc1b644360e5fe27f965 100644 (file)
@@ -39,6 +39,7 @@ static void ltalk_setup(struct net_device *dev)
 
 struct net_device *alloc_ltalkdev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "lt%d", ltalk_setup);
+       return alloc_netdev(sizeof_priv, "lt%d", NET_NAME_UNKNOWN,
+                           ltalk_setup);
 }
 EXPORT_SYMBOL(alloc_ltalkdev);
index 403e71fa88feb900a7d194f3f3c83f4f6e1cb0d4..cc78538d163bbf05bd6bcf5aa5c8c4954fe8ac66 100644 (file)
@@ -682,8 +682,8 @@ static int br2684_create(void __user *arg)
 
        netdev = alloc_netdev(sizeof(struct br2684_dev),
                              ni.ifname[0] ? ni.ifname : "nas%d",
-                             (payload == p_routed) ?
-                             br2684_setup_routed : br2684_setup);
+                             NET_NAME_UNKNOWN,
+                             (payload == p_routed) ? br2684_setup_routed : br2684_setup);
        if (!netdev)
                return -ENOMEM;
 
index ba291ce4bdff95c59b02a9eeaf9de87dd0a7315b..46339040fef014771e504803eef23356c6a1a9f4 100644 (file)
@@ -520,7 +520,8 @@ static int clip_create(int number)
                        if (PRIV(dev)->number >= number)
                                number = PRIV(dev)->number + 1;
        }
-       dev = alloc_netdev(sizeof(struct clip_priv), "", clip_setup);
+       dev = alloc_netdev(sizeof(struct clip_priv), "", NET_NAME_UNKNOWN,
+                          clip_setup);
        if (!dev)
                return -ENOMEM;
        clip_priv = PRIV(dev);
index cbd677f48c00541fc8ff9aed5b0943d3855b1810..e0a723991c54c214dc12c7bed679c4195d71e8b9 100644 (file)
@@ -927,7 +927,7 @@ struct net_device *batadv_softif_create(const char *name)
        int ret;
 
        soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
-                                 batadv_softif_init_early);
+                                 NET_NAME_UNKNOWN, batadv_softif_init_early);
        if (!soft_iface)
                return NULL;
 
index fc47baa888c54896c6ccde6352202736d3c9ba6b..f40cb0436eba1ece028c9852d5f69b9c504f62b4 100644 (file)
@@ -900,32 +900,24 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 
        bat_kobj = &bat_priv->soft_iface->dev.kobj;
 
-       uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
-                               strlen(batadv_uev_type_str[type]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[0] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_TYPE_VAR,
+                                 batadv_uev_type_str[type]);
        if (!uevent_env[0])
                goto out;
 
-       sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
-               batadv_uev_type_str[type]);
-
-       uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
-                               strlen(batadv_uev_action_str[action]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[1] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_ACTION_VAR,
+                                 batadv_uev_action_str[action]);
        if (!uevent_env[1])
                goto out;
 
-       sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
-               batadv_uev_action_str[action]);
-
        /* If the event is DEL, ignore the data field */
        if (action != BATADV_UEV_DEL) {
-               uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
-                                       strlen(data) + 1, GFP_ATOMIC);
+               uevent_env[2] = kasprintf(GFP_ATOMIC,
+                                         "%s%s", BATADV_UEV_DATA_VAR, data);
                if (!uevent_env[2])
                        goto out;
-
-               sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
        }
 
        ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
index 8796ffa08b43b4f57ae1485f4a5867c0f532807c..206b65ccd5b8bd318e554e452966fae1c582112f 100644 (file)
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2013 Intel Corp.
+   Copyright (c) 2013-2014 Intel Corp.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,8 @@
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
 
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
-#include "6lowpan.h"
-
 #include <net/6lowpan.h> /* for the compression support */
 
+#define VERSION "0.1"
+
+static struct dentry *lowpan_psm_debugfs;
+static struct dentry *lowpan_control_debugfs;
+
 #define IFACE_NAME_TEMPLATE "bt%d"
 #define EUI64_ADDR_LEN 8
 
 struct skb_cb {
        struct in6_addr addr;
-       struct l2cap_conn *conn;
+       struct l2cap_chan *chan;
+       int status;
 };
 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
 
@@ -48,9 +54,19 @@ struct skb_cb {
 static LIST_HEAD(bt_6lowpan_devices);
 static DEFINE_RWLOCK(devices_lock);
 
+/* If psm is set to 0 (default value), then 6lowpan is disabled.
+ * Other values are used to indicate a Protocol Service Multiplexer
+ * value for 6lowpan.
+ */
+static u16 psm_6lowpan;
+
+/* We are listening incoming connections via this channel
+ */
+static struct l2cap_chan *listen_chan;
+
 struct lowpan_peer {
        struct list_head list;
-       struct l2cap_conn *conn;
+       struct l2cap_chan *chan;
 
        /* peer addresses in various formats */
        unsigned char eui64_addr[EUI64_ADDR_LEN];
@@ -84,6 +100,8 @@ static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
 {
        list_del(&peer->list);
 
+       module_put(THIS_MODULE);
+
        if (atomic_dec_and_test(&dev->peer_count)) {
                BT_DBG("last peer");
                return true;
@@ -101,13 +119,26 @@ static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
               ba, type);
 
        list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
-               BT_DBG("addr %pMR type %d",
-                      &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
+               BT_DBG("dst addr %pMR dst type %d",
+                      &peer->chan->dst, peer->chan->dst_type);
 
-               if (bacmp(&peer->conn->hcon->dst, ba))
+               if (bacmp(&peer->chan->dst, ba))
                        continue;
 
-               if (type == peer->conn->hcon->dst_type)
+               if (type == peer->chan->dst_type)
+                       return peer;
+       }
+
+       return NULL;
+}
+
+static inline struct lowpan_peer *peer_lookup_chan(struct lowpan_dev *dev,
+                                                  struct l2cap_chan *chan)
+{
+       struct lowpan_peer *peer, *tmp;
+
+       list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+               if (peer->chan == chan)
                        return peer;
        }
 
@@ -120,7 +151,7 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
        struct lowpan_peer *peer, *tmp;
 
        list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
-               if (peer->conn == conn)
+               if (peer->chan->conn == conn)
                        return peer;
        }
 
@@ -176,16 +207,16 @@ static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
                return -ENOMEM;
 
        ret = netif_rx(skb_cp);
-
-       BT_DBG("receive skb %d", ret);
-       if (ret < 0)
+       if (ret < 0) {
+               BT_DBG("receive skb %d", ret);
                return NET_RX_DROP;
+       }
 
        return ret;
 }
 
 static int process_data(struct sk_buff *skb, struct net_device *netdev,
-                       struct l2cap_conn *conn)
+                       struct l2cap_chan *chan)
 {
        const u8 *saddr, *daddr;
        u8 iphc0, iphc1;
@@ -196,7 +227,7 @@ static int process_data(struct sk_buff *skb, struct net_device *netdev,
        dev = lowpan_dev(netdev);
 
        read_lock_irqsave(&devices_lock, flags);
-       peer = peer_lookup_conn(dev, conn);
+       peer = peer_lookup_chan(dev, chan);
        read_unlock_irqrestore(&devices_lock, flags);
        if (!peer)
                goto drop;
@@ -225,7 +256,7 @@ drop:
 }
 
 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
-                   struct l2cap_conn *conn)
+                   struct l2cap_chan *chan)
 {
        struct sk_buff *local_skb;
        int ret;
@@ -269,7 +300,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
                        if (!local_skb)
                                goto drop;
 
-                       ret = process_data(local_skb, dev, conn);
+                       ret = process_data(local_skb, dev, chan);
                        if (ret != NET_RX_SUCCESS)
                                goto drop;
 
@@ -286,147 +317,39 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
        return NET_RX_SUCCESS;
 
 drop:
+       dev->stats.rx_dropped++;
        kfree_skb(skb);
        return NET_RX_DROP;
 }
 
 /* Packet from BT LE device */
-int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
        struct lowpan_dev *dev;
        struct lowpan_peer *peer;
        int err;
 
-       peer = lookup_peer(conn);
+       peer = lookup_peer(chan->conn);
        if (!peer)
                return -ENOENT;
 
-       dev = lookup_dev(conn);
+       dev = lookup_dev(chan->conn);
        if (!dev || !dev->netdev)
                return -ENOENT;
 
-       err = recv_pkt(skb, dev->netdev, conn);
-       BT_DBG("recv pkt %d", err);
-
-       return err;
-}
-
-static inline int skbuff_copy(void *msg, int len, int count, int mtu,
-                             struct sk_buff *skb, struct net_device *dev)
-{
-       struct sk_buff **frag;
-       int sent = 0;
-
-       memcpy(skb_put(skb, count), msg, count);
-
-       sent += count;
-       msg  += count;
-       len  -= count;
-
-       dev->stats.tx_bytes += count;
-       dev->stats.tx_packets++;
-
-       raw_dump_table(__func__, "Sending", skb->data, skb->len);
-
-       /* Continuation fragments (no L2CAP header) */
-       frag = &skb_shinfo(skb)->frag_list;
-       while (len > 0) {
-               struct sk_buff *tmp;
-
-               count = min_t(unsigned int, mtu, len);
-
-               tmp = bt_skb_alloc(count, GFP_ATOMIC);
-               if (!tmp)
-                       return -ENOMEM;
-
-               *frag = tmp;
-
-               memcpy(skb_put(*frag, count), msg, count);
-
-               raw_dump_table(__func__, "Sending fragment",
-                              (*frag)->data, count);
-
-               (*frag)->priority = skb->priority;
-
-               sent += count;
-               msg  += count;
-               len  -= count;
-
-               skb->len += (*frag)->len;
-               skb->data_len += (*frag)->len;
-
-               frag = &(*frag)->next;
-
-               dev->stats.tx_bytes += count;
-               dev->stats.tx_packets++;
+       err = recv_pkt(skb, dev->netdev, chan);
+       if (err) {
+               BT_DBG("recv pkt %d", err);
+               err = -EAGAIN;
        }
 
-       return sent;
-}
-
-static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
-                                 size_t len, u32 priority,
-                                 struct net_device *dev)
-{
-       struct sk_buff *skb;
-       int err, count;
-       struct l2cap_hdr *lh;
-
-       /* FIXME: This mtu check should be not needed and atm is only used for
-        * testing purposes
-        */
-       if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
-               conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
-
-       count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
-
-       BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
-
-       skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
-       if (!skb)
-               return ERR_PTR(-ENOMEM);
-
-       skb->priority = priority;
-
-       lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
-       lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
-       lh->len = cpu_to_le16(len);
-
-       err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
-       if (unlikely(err < 0)) {
-               kfree_skb(skb);
-               BT_DBG("skbuff copy %d failed", err);
-               return ERR_PTR(err);
-       }
-
-       return skb;
-}
-
-static int conn_send(struct l2cap_conn *conn,
-                    void *msg, size_t len, u32 priority,
-                    struct net_device *dev)
-{
-       struct sk_buff *skb;
-
-       skb = create_pdu(conn, msg, len, priority, dev);
-       if (IS_ERR(skb))
-               return -EINVAL;
-
-       BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
-              skb->priority);
-
-       hci_send_acl(conn->hchan, skb, ACL_START);
-
-       return 0;
+       return err;
 }
 
 static u8 get_addr_type_from_eui64(u8 byte)
 {
-       /* Is universal(0) or local(1) bit,  */
-       if (byte & 0x02)
-               return ADDR_LE_DEV_RANDOM;
-
-       return ADDR_LE_DEV_PUBLIC;
+       /* Is universal(0) or local(1) bit */
+       return ((byte & 0x02) ? BDADDR_LE_RANDOM : BDADDR_LE_PUBLIC);
 }
 
 static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
@@ -475,7 +398,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
        if (ipv6_addr_is_multicast(&hdr->daddr)) {
                memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
                       sizeof(struct in6_addr));
-               lowpan_cb(skb)->conn = NULL;
+               lowpan_cb(skb)->chan = NULL;
        } else {
                unsigned long flags;
 
@@ -484,9 +407,8 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
                 */
                convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
 
-               BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
-                      addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
-                      &hdr->daddr);
+               BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
+                      addr_type, &hdr->daddr);
 
                read_lock_irqsave(&devices_lock, flags);
                peer = peer_lookup_ba(dev, &addr, addr_type);
@@ -501,7 +423,7 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
 
                memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
                       sizeof(struct in6_addr));
-               lowpan_cb(skb)->conn = peer->conn;
+               lowpan_cb(skb)->chan = peer->chan;
        }
 
        saddr = dev->netdev->dev_addr;
@@ -510,14 +432,42 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
 }
 
 /* Packet to BT LE device */
-static int send_pkt(struct l2cap_conn *conn, const void *saddr,
-                   const void *daddr, struct sk_buff *skb,
+static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
                    struct net_device *netdev)
 {
-       raw_dump_table(__func__, "raw skb data dump before fragmentation",
-                      skb->data, skb->len);
+       struct msghdr msg;
+       struct kvec iv;
+       int err;
+
+       /* Remember the skb so that we can send EAGAIN to the caller if
+        * we run out of credits.
+        */
+       chan->data = skb;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.msg_iov = (struct iovec *) &iv;
+       msg.msg_iovlen = 1;
+       iv.iov_base = skb->data;
+       iv.iov_len = skb->len;
+
+       err = l2cap_chan_send(chan, &msg, skb->len);
+       if (err > 0) {
+               netdev->stats.tx_bytes += err;
+               netdev->stats.tx_packets++;
+               return 0;
+       }
+
+       if (!err)
+               err = lowpan_cb(skb)->status;
 
-       return conn_send(conn, skb->data, skb->len, 0, netdev);
+       if (err < 0) {
+               if (err == -EAGAIN)
+                       netdev->stats.tx_dropped++;
+               else
+                       netdev->stats.tx_errors++;
+       }
+
+       return err;
 }
 
 static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
@@ -540,8 +490,7 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
                list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
                        local_skb = skb_clone(skb, GFP_ATOMIC);
 
-                       send_pkt(pentry->conn, netdev->dev_addr,
-                                pentry->eui64_addr, local_skb, netdev);
+                       send_pkt(pentry->chan, local_skb, netdev);
 
                        kfree_skb(local_skb);
                }
@@ -553,7 +502,6 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
 static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        int err = 0;
-       unsigned char *eui64_addr;
        struct lowpan_dev *dev;
        struct lowpan_peer *peer;
        bdaddr_t addr;
@@ -568,21 +516,20 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
                unsigned long flags;
 
                convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
-               eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
                dev = lowpan_dev(netdev);
 
                read_lock_irqsave(&devices_lock, flags);
                peer = peer_lookup_ba(dev, &addr, addr_type);
                read_unlock_irqrestore(&devices_lock, flags);
 
-               BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
-                      netdev->name, &addr,
-                      addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
+               BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p",
+                      netdev->name, &addr, addr_type,
                       &lowpan_cb(skb)->addr, peer);
 
-               if (peer && peer->conn)
-                       err = send_pkt(peer->conn, netdev->dev_addr,
-                                      eui64_addr, skb, netdev);
+               if (peer && peer->chan)
+                       err = send_pkt(peer->chan, skb, netdev);
+               else
+                       err = -ENOENT;
        }
        dev_kfree_skb(skb);
 
@@ -634,7 +581,7 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
        eui[7] = addr[0];
 
        /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
-       if (addr_type == ADDR_LE_DEV_PUBLIC)
+       if (addr_type == BDADDR_LE_PUBLIC)
                eui[0] &= ~0x02;
        else
                eui[0] |= 0x02;
@@ -660,6 +607,17 @@ static void ifup(struct net_device *netdev)
        rtnl_unlock();
 }
 
+static void ifdown(struct net_device *netdev)
+{
+       int err;
+
+       rtnl_lock();
+       err = dev_close(netdev);
+       if (err < 0)
+               BT_INFO("iface %s cannot be closed (%d)", netdev->name, err);
+       rtnl_unlock();
+}
+
 static void do_notify_peers(struct work_struct *work)
 {
        struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
@@ -673,26 +631,64 @@ static bool is_bt_6lowpan(struct hci_conn *hcon)
        if (hcon->type != LE_LINK)
                return false;
 
-       return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
+       if (!psm_6lowpan)
+               return false;
+
+       return true;
+}
+
+static struct l2cap_chan *chan_create(void)
+{
+       struct l2cap_chan *chan;
+
+       chan = l2cap_chan_create();
+       if (!chan)
+               return NULL;
+
+       l2cap_chan_set_defaults(chan);
+
+       chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
+       chan->mode = L2CAP_MODE_LE_FLOWCTL;
+       chan->omtu = 65535;
+       chan->imtu = chan->omtu;
+
+       return chan;
 }
 
-static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
+static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
+{
+       struct l2cap_chan *chan;
+
+       chan = chan_create();
+       if (!chan)
+               return NULL;
+
+       chan->remote_mps = chan->omtu;
+       chan->mps = chan->omtu;
+
+       chan->state = BT_CONNECTED;
+
+       return chan;
+}
+
+static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
+                                       struct lowpan_dev *dev)
 {
        struct lowpan_peer *peer;
        unsigned long flags;
 
        peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
        if (!peer)
-               return -ENOMEM;
+               return NULL;
 
-       peer->conn = conn;
+       peer->chan = chan;
        memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
 
        /* RFC 2464 ch. 5 */
        peer->peer_addr.s6_addr[0] = 0xFE;
        peer->peer_addr.s6_addr[1] = 0x80;
-       set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
-                conn->hcon->dst_type);
+       set_addr((u8 *)&peer->peer_addr.s6_addr + 8, chan->dst.b,
+                chan->dst_type);
 
        memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
               EUI64_ADDR_LEN);
@@ -706,40 +702,24 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
        INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
        schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
 
-       return 0;
+       return peer->chan;
 }
 
-/* This gets called when BT LE 6LoWPAN device is connected. We then
- * create network device that acts as a proxy between BT LE device
- * and kernel network stack.
- */
-int bt_6lowpan_add_conn(struct l2cap_conn *conn)
+static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
 {
-       struct lowpan_peer *peer = NULL;
-       struct lowpan_dev *dev;
        struct net_device *netdev;
        int err = 0;
        unsigned long flags;
 
-       if (!is_bt_6lowpan(conn->hcon))
-               return 0;
-
-       peer = lookup_peer(conn);
-       if (peer)
-               return -EEXIST;
-
-       dev = lookup_dev(conn);
-       if (dev)
-               return add_peer_conn(conn, dev);
-
-       netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
+       netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
+                             NET_NAME_UNKNOWN, netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
-       set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
+       set_dev_addr(netdev, &chan->src, chan->src_type);
 
        netdev->netdev_ops = &netdev_ops;
-       SET_NETDEV_DEV(netdev, &conn->hcon->dev);
+       SET_NETDEV_DEV(netdev, &chan->conn->hcon->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
        err = register_netdev(netdev);
@@ -749,28 +729,61 @@ int bt_6lowpan_add_conn(struct l2cap_conn *conn)
                goto out;
        }
 
-       BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
-              netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
+       BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
+              netdev->ifindex, &chan->dst, chan->dst_type,
+              &chan->src, chan->src_type);
        set_bit(__LINK_STATE_PRESENT, &netdev->state);
 
-       dev = netdev_priv(netdev);
-       dev->netdev = netdev;
-       dev->hdev = conn->hcon->hdev;
-       INIT_LIST_HEAD(&dev->peers);
+       *dev = netdev_priv(netdev);
+       (*dev)->netdev = netdev;
+       (*dev)->hdev = chan->conn->hcon->hdev;
+       INIT_LIST_HEAD(&(*dev)->peers);
 
        write_lock_irqsave(&devices_lock, flags);
-       INIT_LIST_HEAD(&dev->list);
-       list_add(&dev->list, &bt_6lowpan_devices);
+       INIT_LIST_HEAD(&(*dev)->list);
+       list_add(&(*dev)->list, &bt_6lowpan_devices);
        write_unlock_irqrestore(&devices_lock, flags);
 
-       ifup(netdev);
-
-       return add_peer_conn(conn, dev);
+       return 0;
 
 out:
        return err;
 }
 
+static inline void chan_ready_cb(struct l2cap_chan *chan)
+{
+       struct lowpan_dev *dev;
+
+       dev = lookup_dev(chan->conn);
+
+       BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
+
+       if (!dev) {
+               if (setup_netdev(chan, &dev) < 0) {
+                       l2cap_chan_del(chan, -ENOENT);
+                       return;
+               }
+       }
+
+       if (!try_module_get(THIS_MODULE))
+               return;
+
+       add_peer_chan(chan, dev);
+       ifup(dev->netdev);
+}
+
+static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan)
+{
+       struct l2cap_chan *pchan;
+
+       pchan = chan_open(chan);
+       pchan->ops = chan->ops;
+
+       BT_DBG("chan %p pchan %p", chan, pchan);
+
+       return pchan;
+}
+
 static void delete_netdev(struct work_struct *work)
 {
        struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
@@ -781,26 +794,43 @@ static void delete_netdev(struct work_struct *work)
        /* The entry pointer is deleted in device_event() */
 }
 
-int bt_6lowpan_del_conn(struct l2cap_conn *conn)
+static void chan_close_cb(struct l2cap_chan *chan)
 {
        struct lowpan_dev *entry, *tmp;
        struct lowpan_dev *dev = NULL;
        struct lowpan_peer *peer;
        int err = -ENOENT;
        unsigned long flags;
-       bool last = false;
+       bool last = false, removed = true;
 
-       if (!conn || !is_bt_6lowpan(conn->hcon))
-               return 0;
+       BT_DBG("chan %p conn %p", chan, chan->conn);
+
+       if (chan->conn && chan->conn->hcon) {
+               if (!is_bt_6lowpan(chan->conn->hcon))
+                       return;
+
+               /* If conn is set, then the netdev is also there and we should
+                * not remove it.
+                */
+               removed = false;
+       }
 
        write_lock_irqsave(&devices_lock, flags);
 
        list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
                dev = lowpan_dev(entry->netdev);
-               peer = peer_lookup_conn(dev, conn);
+               peer = peer_lookup_chan(dev, chan);
                if (peer) {
                        last = peer_del(dev, peer);
                        err = 0;
+
+                       BT_DBG("dev %p removing %speer %p", dev,
+                              last ? "last " : "1 ", peer);
+                       BT_DBG("chan %p orig refcnt %d", chan,
+                              atomic_read(&chan->kref.refcount));
+
+                       l2cap_chan_put(chan);
+                       kfree(peer);
                        break;
                }
        }
@@ -810,18 +840,402 @@ int bt_6lowpan_del_conn(struct l2cap_conn *conn)
 
                cancel_delayed_work_sync(&dev->notify_peers);
 
-               /* bt_6lowpan_del_conn() is called with hci dev lock held which
-                * means that we must delete the netdevice in worker thread.
-                */
-               INIT_WORK(&entry->delete_netdev, delete_netdev);
-               schedule_work(&entry->delete_netdev);
+               ifdown(dev->netdev);
+
+               if (!removed) {
+                       INIT_WORK(&entry->delete_netdev, delete_netdev);
+                       schedule_work(&entry->delete_netdev);
+               }
        } else {
                write_unlock_irqrestore(&devices_lock, flags);
        }
 
+       return;
+}
+
+static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
+{
+       BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
+              state_to_string(state), err);
+}
+
+static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
+                                        unsigned long hdr_len,
+                                        unsigned long len, int nb)
+{
+       /* Note that we must allocate using GFP_ATOMIC here as
+        * this function is called originally from netdev hard xmit
+        * function in atomic context.
+        */
+       return bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
+}
+
+static void chan_suspend_cb(struct l2cap_chan *chan)
+{
+       struct sk_buff *skb = chan->data;
+
+       BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
+
+       lowpan_cb(skb)->status = -EAGAIN;
+}
+
+static void chan_resume_cb(struct l2cap_chan *chan)
+{
+       struct sk_buff *skb = chan->data;
+
+       BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
+
+       lowpan_cb(skb)->status = 0;
+}
+
+static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
+{
+       return msecs_to_jiffies(1000);
+}
+
+static const struct l2cap_ops bt_6lowpan_chan_ops = {
+       .name                   = "L2CAP 6LoWPAN channel",
+       .new_connection         = chan_new_conn_cb,
+       .recv                   = chan_recv_cb,
+       .close                  = chan_close_cb,
+       .state_change           = chan_state_change_cb,
+       .ready                  = chan_ready_cb,
+       .resume                 = chan_resume_cb,
+       .suspend                = chan_suspend_cb,
+       .get_sndtimeo           = chan_get_sndtimeo_cb,
+       .alloc_skb              = chan_alloc_skb_cb,
+       .memcpy_fromiovec       = l2cap_chan_no_memcpy_fromiovec,
+
+       .teardown               = l2cap_chan_no_teardown,
+       .defer                  = l2cap_chan_no_defer,
+       .set_shutdown           = l2cap_chan_no_set_shutdown,
+};
+
+static inline __u8 bdaddr_type(__u8 type)
+{
+       if (type == ADDR_LE_DEV_PUBLIC)
+               return BDADDR_LE_PUBLIC;
+       else
+               return BDADDR_LE_RANDOM;
+}
+
+static struct l2cap_chan *chan_get(void)
+{
+       struct l2cap_chan *pchan;
+
+       pchan = chan_create();
+       if (!pchan)
+               return NULL;
+
+       pchan->ops = &bt_6lowpan_chan_ops;
+
+       return pchan;
+}
+
+static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
+{
+       struct l2cap_chan *pchan;
+       int err;
+
+       pchan = chan_get();
+       if (!pchan)
+               return -EINVAL;
+
+       err = l2cap_chan_connect(pchan, cpu_to_le16(psm_6lowpan), 0,
+                                addr, dst_type);
+
+       BT_DBG("chan %p err %d", pchan, err);
+       if (err < 0)
+               l2cap_chan_put(pchan);
+
        return err;
 }
 
+static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
+{
+       struct lowpan_peer *peer;
+
+       BT_DBG("conn %p dst type %d", conn, dst_type);
+
+       peer = lookup_peer(conn);
+       if (!peer)
+               return -ENOENT;
+
+       BT_DBG("peer %p chan %p", peer, peer->chan);
+
+       l2cap_chan_close(peer->chan, ENOENT);
+
+       return 0;
+}
+
+static struct l2cap_chan *bt_6lowpan_listen(void)
+{
+       bdaddr_t *addr = BDADDR_ANY;
+       struct l2cap_chan *pchan;
+       int err;
+
+       if (psm_6lowpan == 0)
+               return NULL;
+
+       pchan = chan_get();
+       if (!pchan)
+               return NULL;
+
+       pchan->state = BT_LISTEN;
+       pchan->src_type = BDADDR_LE_PUBLIC;
+
+       BT_DBG("psm 0x%04x chan %p src type %d", psm_6lowpan, pchan,
+              pchan->src_type);
+
+       err = l2cap_add_psm(pchan, addr, cpu_to_le16(psm_6lowpan));
+       if (err) {
+               l2cap_chan_put(pchan);
+               BT_ERR("psm cannot be added err %d", err);
+               return NULL;
+       }
+
+       return pchan;
+}
+
+static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
+                         struct l2cap_conn **conn)
+{
+       struct hci_conn *hcon;
+       struct hci_dev *hdev;
+       bdaddr_t *src = BDADDR_ANY;
+       int n;
+
+       n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
+                  &addr->b[5], &addr->b[4], &addr->b[3],
+                  &addr->b[2], &addr->b[1], &addr->b[0],
+                  addr_type);
+
+       if (n < 7)
+               return -EINVAL;
+
+       hdev = hci_get_route(addr, src);
+       if (!hdev)
+               return -ENOENT;
+
+       hci_dev_lock(hdev);
+       hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       hci_dev_unlock(hdev);
+
+       if (!hcon)
+               return -ENOENT;
+
+       *conn = (struct l2cap_conn *)hcon->l2cap_data;
+
+       BT_DBG("conn %p dst %pMR type %d", *conn, &hcon->dst, hcon->dst_type);
+
+       return 0;
+}
+
+static void disconnect_all_peers(void)
+{
+       struct lowpan_dev *entry, *tmp_dev;
+       struct lowpan_peer *peer, *tmp_peer, *new_peer;
+       struct list_head peers;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&peers);
+
+       /* We make a separate list of peers as the close_cb() will
+        * modify the device peers list so it is better not to mess
+        * with the same list at the same time.
+        */
+
+       read_lock_irqsave(&devices_lock, flags);
+
+       list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
+               list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list) {
+                       new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
+                       if (!new_peer)
+                               break;
+
+                       new_peer->chan = peer->chan;
+                       INIT_LIST_HEAD(&new_peer->list);
+
+                       list_add(&new_peer->list, &peers);
+               }
+       }
+
+       read_unlock_irqrestore(&devices_lock, flags);
+
+       list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
+               l2cap_chan_close(peer->chan, ENOENT);
+               kfree(peer);
+       }
+}
+
+static int lowpan_psm_set(void *data, u64 val)
+{
+       u16 psm;
+
+       psm = val;
+       if (psm == 0 || psm_6lowpan != psm)
+               /* Disconnect existing connections if 6lowpan is
+                * disabled (psm = 0), or if psm changes.
+                */
+               disconnect_all_peers();
+
+       psm_6lowpan = psm;
+
+       if (listen_chan) {
+               l2cap_chan_close(listen_chan, 0);
+               l2cap_chan_put(listen_chan);
+       }
+
+       listen_chan = bt_6lowpan_listen();
+
+       return 0;
+}
+
+static int lowpan_psm_get(void *data, u64 *val)
+{
+       *val = psm_6lowpan;
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(lowpan_psm_fops, lowpan_psm_get,
+                       lowpan_psm_set, "%llu\n");
+
+static ssize_t lowpan_control_write(struct file *fp,
+                                   const char __user *user_buffer,
+                                   size_t count,
+                                   loff_t *position)
+{
+       char buf[32];
+       size_t buf_size = min(count, sizeof(buf) - 1);
+       int ret;
+       bdaddr_t addr;
+       u8 addr_type;
+       struct l2cap_conn *conn = NULL;
+
+       if (copy_from_user(buf, user_buffer, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+
+       if (memcmp(buf, "connect ", 8) == 0) {
+               ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
+               if (ret == -EINVAL)
+                       return ret;
+
+               if (listen_chan) {
+                       l2cap_chan_close(listen_chan, 0);
+                       l2cap_chan_put(listen_chan);
+                       listen_chan = NULL;
+               }
+
+               if (conn) {
+                       struct lowpan_peer *peer;
+
+                       if (!is_bt_6lowpan(conn->hcon))
+                               return -EINVAL;
+
+                       peer = lookup_peer(conn);
+                       if (peer) {
+                               BT_DBG("6LoWPAN connection already exists");
+                               return -EALREADY;
+                       }
+
+                       BT_DBG("conn %p dst %pMR type %d user %d", conn,
+                              &conn->hcon->dst, conn->hcon->dst_type,
+                              addr_type);
+               }
+
+               ret = bt_6lowpan_connect(&addr, addr_type);
+               if (ret < 0)
+                       return ret;
+
+               return count;
+       }
+
+       if (memcmp(buf, "disconnect ", 11) == 0) {
+               ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
+               if (ret < 0)
+                       return ret;
+
+               ret = bt_6lowpan_disconnect(conn, addr_type);
+               if (ret < 0)
+                       return ret;
+
+               return count;
+       }
+
+       return count;
+}
+
+static int lowpan_control_show(struct seq_file *f, void *ptr)
+{
+       struct lowpan_dev *entry, *tmp_dev;
+       struct lowpan_peer *peer, *tmp_peer;
+       unsigned long flags;
+
+       read_lock_irqsave(&devices_lock, flags);
+
+       list_for_each_entry_safe(entry, tmp_dev, &bt_6lowpan_devices, list) {
+               list_for_each_entry_safe(peer, tmp_peer, &entry->peers, list)
+                       seq_printf(f, "%pMR (type %u)\n",
+                                  &peer->chan->dst, peer->chan->dst_type);
+       }
+
+       read_unlock_irqrestore(&devices_lock, flags);
+
+       return 0;
+}
+
+static int lowpan_control_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, lowpan_control_show, inode->i_private);
+}
+
+static const struct file_operations lowpan_control_fops = {
+       .open           = lowpan_control_open,
+       .read           = seq_read,
+       .write          = lowpan_control_write,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void disconnect_devices(void)
+{
+       struct lowpan_dev *entry, *tmp, *new_dev;
+       struct list_head devices;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&devices);
+
+       /* We make a separate list of devices because the unregister_netdev()
+        * will call device_event() which will also want to modify the same
+        * devices list.
+        */
+
+       read_lock_irqsave(&devices_lock, flags);
+
+       list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
+               new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
+               if (!new_dev)
+                       break;
+
+               new_dev->netdev = entry->netdev;
+               INIT_LIST_HEAD(&new_dev->list);
+
+               list_add(&new_dev->list, &devices);
+       }
+
+       read_unlock_irqrestore(&devices_lock, flags);
+
+       list_for_each_entry_safe(entry, tmp, &devices, list) {
+               ifdown(entry->netdev);
+               BT_DBG("Unregistering netdev %s %p",
+                      entry->netdev->name, entry->netdev);
+               unregister_netdev(entry->netdev);
+               kfree(entry);
+       }
+}
+
 static int device_event(struct notifier_block *unused,
                        unsigned long event, void *ptr)
 {
@@ -838,6 +1252,8 @@ static int device_event(struct notifier_block *unused,
                list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
                                         list) {
                        if (entry->netdev == netdev) {
+                               BT_DBG("Unregistered netdev %s %p",
+                                      netdev->name, netdev);
                                list_del(&entry->list);
                                kfree(entry);
                                break;
@@ -854,12 +1270,37 @@ static struct notifier_block bt_6lowpan_dev_notifier = {
        .notifier_call = device_event,
 };
 
-int bt_6lowpan_init(void)
+static int __init bt_6lowpan_init(void)
 {
+       lowpan_psm_debugfs = debugfs_create_file("6lowpan_psm", 0644,
+                                                bt_debugfs, NULL,
+                                                &lowpan_psm_fops);
+       lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
+                                                    bt_debugfs, NULL,
+                                                    &lowpan_control_fops);
+
        return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
 }
 
-void bt_6lowpan_cleanup(void)
+static void __exit bt_6lowpan_exit(void)
 {
+       debugfs_remove(lowpan_psm_debugfs);
+       debugfs_remove(lowpan_control_debugfs);
+
+       if (listen_chan) {
+               l2cap_chan_close(listen_chan, 0);
+               l2cap_chan_put(listen_chan);
+       }
+
+       disconnect_devices();
+
        unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
 }
+
+module_init(bt_6lowpan_init);
+module_exit(bt_6lowpan_exit);
+
+MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>");
+MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/6lowpan.h b/net/bluetooth/6lowpan.h
deleted file mode 100644 (file)
index 5d281f1..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
-   Copyright (c) 2013 Intel Corp.
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License version 2 and
-   only version 2 as published by the Free Software Foundation.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-*/
-
-#ifndef __6LOWPAN_H
-#define __6LOWPAN_H
-
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-#include <net/bluetooth/l2cap.h>
-
-#if IS_ENABLED(CONFIG_BT_6LOWPAN)
-int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
-int bt_6lowpan_add_conn(struct l2cap_conn *conn);
-int bt_6lowpan_del_conn(struct l2cap_conn *conn);
-int bt_6lowpan_init(void);
-void bt_6lowpan_cleanup(void);
-#else
-static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
-{
-       return -EOPNOTSUPP;
-}
-static int bt_6lowpan_add_conn(struct l2cap_conn *conn)
-{
-       return -EOPNOTSUPP;
-}
-int bt_6lowpan_del_conn(struct l2cap_conn *conn)
-{
-       return -EOPNOTSUPP;
-}
-static int bt_6lowpan_init(void)
-{
-       return -EOPNOTSUPP;
-}
-static void bt_6lowpan_cleanup(void) { }
-#endif
-
-#endif /* __6LOWPAN_H */
index 06ec14499ca129d2e5b8833a01b7530bc829802d..f5afaa22f6ecda644d31551cb32d7a3758de31a3 100644 (file)
@@ -6,7 +6,6 @@ menuconfig BT
        tristate "Bluetooth subsystem support"
        depends on NET && !S390
        depends on RFKILL || !RFKILL
-       select 6LOWPAN_IPHC if BT_6LOWPAN
        select CRC16
        select CRYPTO
        select CRYPTO_BLKCIPHER
@@ -41,10 +40,11 @@ menuconfig BT
          more information, see <http://www.bluez.org/>.
 
 config BT_6LOWPAN
-       bool "Bluetooth 6LoWPAN support"
+       tristate "Bluetooth 6LoWPAN support"
        depends on BT && IPV6
+       select 6LOWPAN_IPHC if BT_6LOWPAN
        help
-         IPv6 compression over Bluetooth.
+         IPv6 compression over Bluetooth Low Energy.
 
 source "net/bluetooth/rfcomm/Kconfig"
 
index ca51246b1016f2496a53220256385f8072c5b360..886e9aa3ecf1ffa3d7cd93d7aea873bfb25b5c1e 100644 (file)
@@ -7,10 +7,12 @@ obj-$(CONFIG_BT_RFCOMM)       += rfcomm/
 obj-$(CONFIG_BT_BNEP)  += bnep/
 obj-$(CONFIG_BT_CMTP)  += cmtp/
 obj-$(CONFIG_BT_HIDP)  += hidp/
+obj-$(CONFIG_BT_6LOWPAN) += bluetooth_6lowpan.o
+
+bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
        a2mp.o amp.o
-bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
index 9514cc9e850ca9f4662cc0bedd898f07cc7f96f5..5dcade511fdbf7da80995ac2c8218171663b339f 100644 (file)
@@ -63,7 +63,7 @@ void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
        msg.msg_iov = (struct iovec *) &iv;
        msg.msg_iovlen = 1;
 
-       l2cap_chan_send(chan, &msg, total_len, 0);
+       l2cap_chan_send(chan, &msg, total_len);
 
        kfree(cmd);
 }
@@ -693,18 +693,19 @@ static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
 }
 
 static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+                                             unsigned long hdr_len,
                                              unsigned long len, int nb)
 {
        struct sk_buff *skb;
 
-       skb = bt_skb_alloc(len, GFP_KERNEL);
+       skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
        return skb;
 }
 
-static struct l2cap_ops a2mp_chan_ops = {
+static const struct l2cap_ops a2mp_chan_ops = {
        .name = "L2CAP A2MP channel",
        .recv = a2mp_chan_recv_cb,
        .close = a2mp_chan_close_cb,
@@ -719,6 +720,7 @@ static struct l2cap_ops a2mp_chan_ops = {
        .resume = l2cap_chan_no_resume,
        .set_shutdown = l2cap_chan_no_set_shutdown,
        .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
+       .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
 };
 
 static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
index 2021c481cdb657a8011938f5e8486c4c93f0c116..4dca0299ed96875b16a80929a98b3a6972003be4 100644 (file)
@@ -639,7 +639,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
-static struct seq_operations bt_seq_ops = {
+static const struct seq_operations bt_seq_ops = {
        .start = bt_seq_start,
        .next  = bt_seq_next,
        .stop  = bt_seq_stop,
index a841d3e776c5e091c19efea8423750edd018e946..85bcc21e84d2006c4839b2b7f409f2595cb41858 100644 (file)
@@ -538,8 +538,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        /* session struct allocated as private part of net_device */
        dev = alloc_netdev(sizeof(struct bnep_session),
-                               (*req->device) ? req->device : "bnep%d",
-                               bnep_net_setup);
+                          (*req->device) ? req->device : "bnep%d",
+                          NET_NAME_UNKNOWN,
+                          bnep_net_setup);
        if (!dev)
                return -ENOMEM;
 
index a7a27bc2c0b1d8a7200e0a627c69b329e08cf838..490ee8846d9e21e910220df687907243dd855feb 100644 (file)
@@ -67,7 +67,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
        conn->state = BT_CONNECT;
        conn->out = true;
 
-       conn->link_mode = HCI_LM_MASTER;
+       set_bit(HCI_CONN_MASTER, &conn->flags);
 
        conn->attempt++;
 
@@ -136,7 +136,7 @@ void hci_disconnect(struct hci_conn *conn, __u8 reason)
        hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
 }
 
-static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
+static void hci_amp_disconn(struct hci_conn *conn)
 {
        struct hci_cp_disconn_phy_link cp;
 
@@ -145,7 +145,7 @@ static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
        conn->state = BT_DISCONN;
 
        cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
-       cp.reason = reason;
+       cp.reason = hci_proto_disconn_ind(conn);
        hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
                     sizeof(cp), &cp);
 }
@@ -213,14 +213,26 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
        return true;
 }
 
-void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
-                       u16 latency, u16 to_multiplier)
+u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
+                     u16 to_multiplier)
 {
-       struct hci_cp_le_conn_update cp;
        struct hci_dev *hdev = conn->hdev;
+       struct hci_conn_params *params;
+       struct hci_cp_le_conn_update cp;
 
-       memset(&cp, 0, sizeof(cp));
+       hci_dev_lock(hdev);
 
+       params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+       if (params) {
+               params->conn_min_interval = min;
+               params->conn_max_interval = max;
+               params->conn_latency = latency;
+               params->supervision_timeout = to_multiplier;
+       }
+
+       hci_dev_unlock(hdev);
+
+       memset(&cp, 0, sizeof(cp));
        cp.handle               = cpu_to_le16(conn->handle);
        cp.conn_interval_min    = cpu_to_le16(min);
        cp.conn_interval_max    = cpu_to_le16(max);
@@ -230,6 +242,11 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
        cp.max_ce_len           = cpu_to_le16(0x0000);
 
        hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
+
+       if (params)
+               return 0x01;
+
+       return 0x00;
 }
 
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
@@ -271,20 +288,6 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
        }
 }
 
-static void hci_conn_disconnect(struct hci_conn *conn)
-{
-       __u8 reason = hci_proto_disconn_ind(conn);
-
-       switch (conn->type) {
-       case AMP_LINK:
-               hci_amp_disconn(conn, reason);
-               break;
-       default:
-               hci_disconnect(conn, reason);
-               break;
-       }
-}
-
 static void hci_conn_timeout(struct work_struct *work)
 {
        struct hci_conn *conn = container_of(work, struct hci_conn,
@@ -319,7 +322,31 @@ static void hci_conn_timeout(struct work_struct *work)
                break;
        case BT_CONFIG:
        case BT_CONNECTED:
-               hci_conn_disconnect(conn);
+               if (conn->type == AMP_LINK) {
+                       hci_amp_disconn(conn);
+               } else {
+                       __u8 reason = hci_proto_disconn_ind(conn);
+
+                       /* When we are master of an established connection
+                        * and it enters the disconnect timeout, then go
+                        * ahead and try to read the current clock offset.
+                        *
+                        * Processing of the result is done within the
+                        * event handling and hci_clock_offset_evt function.
+                        */
+                       if (conn->type == ACL_LINK &&
+                           test_bit(HCI_CONN_MASTER, &conn->flags)) {
+                               struct hci_dev *hdev = conn->hdev;
+                               struct hci_cp_read_clock_offset cp;
+
+                               cp.handle = cpu_to_le16(conn->handle);
+
+                               hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
+                                            sizeof(cp), &cp);
+                       }
+
+                       hci_disconnect(conn, reason);
+               }
                break;
        default:
                conn->state = BT_CLOSED;
@@ -336,9 +363,6 @@ static void hci_conn_idle(struct work_struct *work)
 
        BT_DBG("hcon %p mode %d", conn, conn->mode);
 
-       if (test_bit(HCI_RAW, &hdev->flags))
-               return;
-
        if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
                return;
 
@@ -529,7 +553,6 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 
        list_for_each_entry(d, &hci_dev_list, list) {
                if (!test_bit(HCI_UP, &d->flags) ||
-                   test_bit(HCI_RAW, &d->flags) ||
                    test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
                    d->dev_type != HCI_BREDR)
                        continue;
@@ -627,7 +650,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        cp.own_address_type = own_addr_type;
        cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
        cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
-       cp.supervision_timeout = cpu_to_le16(0x002a);
+       cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
+       cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
        cp.min_ce_len = cpu_to_le16(0x0000);
        cp.max_ce_len = cpu_to_le16(0x0000);
 
@@ -644,15 +668,12 @@ static void hci_req_directed_advertising(struct hci_request *req,
        u8 own_addr_type;
        u8 enable;
 
-       enable = 0x00;
-       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-
-       /* Clear the HCI_ADVERTISING bit temporarily so that the
+       /* Clear the HCI_LE_ADV bit temporarily so that the
         * hci_update_random_address knows that it's safe to go ahead
         * and write a new random address. The flag will be set back on
         * as soon as the SET_ADV_ENABLE HCI command completes.
         */
-       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
 
        /* Set require_privacy to false so that the remote device has a
         * chance of identifying us.
@@ -676,7 +697,8 @@ static void hci_req_directed_advertising(struct hci_request *req,
 }
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
-                               u8 dst_type, u8 sec_level, u8 auth_type)
+                               u8 dst_type, u8 sec_level, u16 conn_timeout,
+                               bool master)
 {
        struct hci_conn_params *params;
        struct hci_conn *conn;
@@ -696,7 +718,6 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
        if (conn) {
                conn->pending_sec_level = sec_level;
-               conn->auth_type = auth_type;
                goto done;
        }
 
@@ -733,25 +754,52 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        conn->dst_type = dst_type;
        conn->sec_level = BT_SECURITY_LOW;
        conn->pending_sec_level = sec_level;
-       conn->auth_type = auth_type;
+       conn->conn_timeout = conn_timeout;
 
        hci_req_init(&req, hdev);
 
-       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+       /* Disable advertising if we're active. For master role
+        * connections most controllers will refuse to connect if
+        * advertising is enabled, and for slave role connections we
+        * anyway have to disable it in order to start directed
+        * advertising.
+        */
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
+               u8 enable = 0x00;
+               hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
+                           &enable);
+       }
+
+       /* If requested to connect as slave use directed advertising */
+       if (!master) {
+               /* If we're active scanning most controllers are unable
+                * to initiate advertising. Simply reject the attempt.
+                */
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+                   hdev->le_scan_type == LE_SCAN_ACTIVE) {
+                       skb_queue_purge(&req.cmd_q);
+                       hci_conn_del(conn);
+                       return ERR_PTR(-EBUSY);
+               }
+
                hci_req_directed_advertising(&req, conn);
                goto create_conn;
        }
 
        conn->out = true;
-       conn->link_mode |= HCI_LM_MASTER;
+       set_bit(HCI_CONN_MASTER, &conn->flags);
 
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
        if (params) {
                conn->le_conn_min_interval = params->conn_min_interval;
                conn->le_conn_max_interval = params->conn_max_interval;
+               conn->le_conn_latency = params->conn_latency;
+               conn->le_supv_timeout = params->supervision_timeout;
        } else {
                conn->le_conn_min_interval = hdev->le_conn_min_interval;
                conn->le_conn_max_interval = hdev->le_conn_max_interval;
+               conn->le_conn_latency = hdev->le_conn_latency;
+               conn->le_supv_timeout = hdev->le_supv_timeout;
        }
 
        /* If controller is scanning, we stop it since some controllers are
@@ -865,7 +913,8 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
                        return 0;
        }
 
-       if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
+       if (hci_conn_ssp_enabled(conn) &&
+           !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
                return 0;
 
        return 1;
@@ -881,7 +930,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
 
        if (sec_level > conn->sec_level)
                conn->pending_sec_level = sec_level;
-       else if (conn->link_mode & HCI_LM_AUTH)
+       else if (test_bit(HCI_CONN_AUTH, &conn->flags))
                return 1;
 
        /* Make sure we preserve an existing MITM requirement*/
@@ -899,7 +948,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                /* If we're already encrypted set the REAUTH_PEND flag,
                 * otherwise set the ENCRYPT_PEND.
                 */
-               if (conn->link_mode & HCI_LM_ENCRYPT)
+               if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
                        set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
                else
                        set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
@@ -940,7 +989,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                return 1;
 
        /* For other security levels we need the link key. */
-       if (!(conn->link_mode & HCI_LM_AUTH))
+       if (!test_bit(HCI_CONN_AUTH, &conn->flags))
                goto auth;
 
        /* An authenticated FIPS approved combination key has sufficient
@@ -980,7 +1029,7 @@ auth:
                return 0;
 
 encrypt:
-       if (conn->link_mode & HCI_LM_ENCRYPT)
+       if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
                return 1;
 
        hci_conn_encrypt(conn);
@@ -1027,7 +1076,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
 {
        BT_DBG("hcon %p", conn);
 
-       if (!role && conn->link_mode & HCI_LM_MASTER)
+       if (!role && test_bit(HCI_CONN_MASTER, &conn->flags))
                return 1;
 
        if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
@@ -1048,9 +1097,6 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
 
        BT_DBG("hcon %p mode %d", conn, conn->mode);
 
-       if (test_bit(HCI_RAW, &hdev->flags))
-               return;
-
        if (conn->mode != HCI_CM_SNIFF)
                goto timer;
 
@@ -1101,6 +1147,28 @@ void hci_conn_check_pending(struct hci_dev *hdev)
        hci_dev_unlock(hdev);
 }
 
+static u32 get_link_mode(struct hci_conn *conn)
+{
+       u32 link_mode = 0;
+
+       if (test_bit(HCI_CONN_MASTER, &conn->flags))
+               link_mode |= HCI_LM_MASTER;
+
+       if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
+               link_mode |= HCI_LM_ENCRYPT;
+
+       if (test_bit(HCI_CONN_AUTH, &conn->flags))
+               link_mode |= HCI_LM_AUTH;
+
+       if (test_bit(HCI_CONN_SECURE, &conn->flags))
+               link_mode |= HCI_LM_SECURE;
+
+       if (test_bit(HCI_CONN_FIPS, &conn->flags))
+               link_mode |= HCI_LM_FIPS;
+
+       return link_mode;
+}
+
 int hci_get_conn_list(void __user *arg)
 {
        struct hci_conn *c;
@@ -1136,7 +1204,7 @@ int hci_get_conn_list(void __user *arg)
                (ci + n)->type  = c->type;
                (ci + n)->out   = c->out;
                (ci + n)->state = c->state;
-               (ci + n)->link_mode = c->link_mode;
+               (ci + n)->link_mode = get_link_mode(c);
                if (++n >= req.conn_num)
                        break;
        }
@@ -1172,7 +1240,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
                ci.type  = conn->type;
                ci.out   = conn->out;
                ci.state = conn->state;
-               ci.link_mode = conn->link_mode;
+               ci.link_mode = get_link_mode(conn);
        }
        hci_dev_unlock(hdev);
 
index 0a43cce9a914b84613c7ee2d6fc30fdfdb2a0bc5..84431b86af963d423dd19d2ffcf957508aa3a1f0 100644 (file)
@@ -35,6 +35,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/mgmt.h>
 
 #include "smp.h"
 
@@ -68,7 +69,7 @@ static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
+       buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -94,7 +95,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
+       if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
                return -EALREADY;
 
        hci_req_lock(hdev);
@@ -115,7 +116,7 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (err < 0)
                return err;
 
-       change_bit(HCI_DUT_MODE, &hdev->dev_flags);
+       change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
 
        return count;
 }
@@ -190,6 +191,31 @@ static const struct file_operations blacklist_fops = {
        .release        = single_release,
 };
 
+static int whitelist_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct bdaddr_list *b;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(b, &hdev->whitelist, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int whitelist_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, whitelist_show, inode->i_private);
+}
+
+static const struct file_operations whitelist_fops = {
+       .open           = whitelist_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int uuids_show(struct seq_file *f, void *p)
 {
        struct hci_dev *hdev = f->private;
@@ -352,62 +378,13 @@ static int auto_accept_delay_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
                        auto_accept_delay_set, "%llu\n");
 
-static int ssp_debug_mode_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-       struct sk_buff *skb;
-       __u8 mode;
-       int err;
-
-       if (val != 0 && val != 1)
-               return -EINVAL;
-
-       if (!test_bit(HCI_UP, &hdev->flags))
-               return -ENETDOWN;
-
-       hci_req_lock(hdev);
-       mode = val;
-       skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
-                            &mode, HCI_CMD_TIMEOUT);
-       hci_req_unlock(hdev);
-
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       err = -bt_to_errno(skb->data[0]);
-       kfree_skb(skb);
-
-       if (err < 0)
-               return err;
-
-       hci_dev_lock(hdev);
-       hdev->ssp_debug_mode = val;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int ssp_debug_mode_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-       *val = hdev->ssp_debug_mode;
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
-                       ssp_debug_mode_set, "%llu\n");
-
 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
+       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -432,10 +409,10 @@ static ssize_t force_sc_support_write(struct file *file,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+       if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
                return -EALREADY;
 
-       change_bit(HCI_FORCE_SC, &hdev->dev_flags);
+       change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
 
        return count;
 }
@@ -719,7 +696,7 @@ static ssize_t force_static_address_read(struct file *file,
        struct hci_dev *hdev = file->private_data;
        char buf[3];
 
-       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
+       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
        buf[1] = '\n';
        buf[2] = '\0';
        return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
@@ -744,10 +721,10 @@ static ssize_t force_static_address_write(struct file *file,
        if (strtobool(buf, &enable))
                return -EINVAL;
 
-       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
+       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
                return -EALREADY;
 
-       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
+       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
 
        return count;
 }
@@ -900,177 +877,113 @@ static int conn_max_interval_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
                        conn_max_interval_set, "%llu\n");
 
-static int adv_channel_map_set(void *data, u64 val)
+static int conn_latency_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
 
-       if (val < 0x01 || val > 0x07)
+       if (val > 0x01f3)
                return -EINVAL;
 
        hci_dev_lock(hdev);
-       hdev->le_adv_channel_map = val;
+       hdev->le_conn_latency = val;
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-static int adv_channel_map_get(void *data, u64 *val)
+static int conn_latency_get(void *data, u64 *val)
 {
        struct hci_dev *hdev = data;
 
        hci_dev_lock(hdev);
-       *val = hdev->le_adv_channel_map;
+       *val = hdev->le_conn_latency;
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
-                       adv_channel_map_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
+                       conn_latency_set, "%llu\n");
 
-static ssize_t lowpan_read(struct file *file, char __user *user_buf,
-                          size_t count, loff_t *ppos)
+static int supervision_timeout_set(void *data, u64 val)
 {
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
-                           size_t count, loff_t *position)
-{
-       struct hci_dev *hdev = fp->private_data;
-       bool enable;
-       char buf[32];
-       size_t buf_size = min(count, (sizeof(buf)-1));
-
-       if (copy_from_user(buf, user_buffer, buf_size))
-               return -EFAULT;
-
-       buf[buf_size] = '\0';
+       struct hci_dev *hdev = data;
 
-       if (strtobool(buf, &enable) < 0)
+       if (val < 0x000a || val > 0x0c80)
                return -EINVAL;
 
-       if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
-               return -EALREADY;
-
-       change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
+       hci_dev_lock(hdev);
+       hdev->le_supv_timeout = val;
+       hci_dev_unlock(hdev);
 
-       return count;
+       return 0;
 }
 
-static const struct file_operations lowpan_debugfs_fops = {
-       .open           = simple_open,
-       .read           = lowpan_read,
-       .write          = lowpan_write,
-       .llseek         = default_llseek,
-};
-
-static int le_auto_conn_show(struct seq_file *sf, void *ptr)
+static int supervision_timeout_get(void *data, u64 *val)
 {
-       struct hci_dev *hdev = sf->private;
-       struct hci_conn_params *p;
+       struct hci_dev *hdev = data;
 
        hci_dev_lock(hdev);
-
-       list_for_each_entry(p, &hdev->le_conn_params, list) {
-               seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
-                          p->auto_connect);
-       }
-
+       *val = hdev->le_supv_timeout;
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-static int le_auto_conn_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, le_auto_conn_show, inode->i_private);
-}
+DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
+                       supervision_timeout_set, "%llu\n");
 
-static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
-                                 size_t count, loff_t *offset)
+static int adv_channel_map_set(void *data, u64 val)
 {
-       struct seq_file *sf = file->private_data;
-       struct hci_dev *hdev = sf->private;
-       u8 auto_connect = 0;
-       bdaddr_t addr;
-       u8 addr_type;
-       char *buf;
-       int err = 0;
-       int n;
+       struct hci_dev *hdev = data;
 
-       /* Don't allow partial write */
-       if (*offset != 0)
+       if (val < 0x01 || val > 0x07)
                return -EINVAL;
 
-       if (count < 3)
-               return -EINVAL;
+       hci_dev_lock(hdev);
+       hdev->le_adv_channel_map = val;
+       hci_dev_unlock(hdev);
 
-       buf = memdup_user(data, count);
-       if (IS_ERR(buf))
-               return PTR_ERR(buf);
+       return 0;
+}
 
-       if (memcmp(buf, "add", 3) == 0) {
-               n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
-                          &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
-                          &addr.b[1], &addr.b[0], &addr_type,
-                          &auto_connect);
+static int adv_channel_map_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
 
-               if (n < 7) {
-                       err = -EINVAL;
-                       goto done;
-               }
+       hci_dev_lock(hdev);
+       *val = hdev->le_adv_channel_map;
+       hci_dev_unlock(hdev);
 
-               hci_dev_lock(hdev);
-               err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
-                                         hdev->le_conn_min_interval,
-                                         hdev->le_conn_max_interval);
-               hci_dev_unlock(hdev);
+       return 0;
+}
 
-               if (err)
-                       goto done;
-       } else if (memcmp(buf, "del", 3) == 0) {
-               n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
-                          &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
-                          &addr.b[1], &addr.b[0], &addr_type);
+DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
+                       adv_channel_map_set, "%llu\n");
 
-               if (n < 7) {
-                       err = -EINVAL;
-                       goto done;
-               }
+static int device_list_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct hci_conn_params *p;
 
-               hci_dev_lock(hdev);
-               hci_conn_params_del(hdev, &addr, addr_type);
-               hci_dev_unlock(hdev);
-       } else if (memcmp(buf, "clr", 3) == 0) {
-               hci_dev_lock(hdev);
-               hci_conn_params_clear(hdev);
-               hci_pend_le_conns_clear(hdev);
-               hci_update_background_scan(hdev);
-               hci_dev_unlock(hdev);
-       } else {
-               err = -EINVAL;
+       hci_dev_lock(hdev);
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
+                          p->auto_connect);
        }
+       hci_dev_unlock(hdev);
 
-done:
-       kfree(buf);
+       return 0;
+}
 
-       if (err)
-               return err;
-       else
-               return count;
+static int device_list_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, device_list_show, inode->i_private);
 }
 
-static const struct file_operations le_auto_conn_fops = {
-       .open           = le_auto_conn_open,
+static const struct file_operations device_list_fops = {
+       .open           = device_list_open,
        .read           = seq_read,
-       .write          = le_auto_conn_write,
        .llseek         = seq_lseek,
        .release        = single_release,
 };
@@ -1549,13 +1462,6 @@ static void hci_setup_event_mask(struct hci_request *req)
                events[7] |= 0x20;      /* LE Meta-Event */
 
        hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
-
-       if (lmp_le_capable(hdev)) {
-               memset(events, 0, sizeof(events));
-               events[0] = 0x1f;
-               hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
-                           sizeof(events), events);
-       }
 }
 
 static void hci_init2_req(struct hci_request *req, unsigned long opt)
@@ -1688,7 +1594,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
        }
 
        /* Enable Authenticated Payload Timeout Expired event if supported */
-       if (lmp_ping_capable(hdev))
+       if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
                events[2] |= 0x80;
 
        hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
@@ -1725,8 +1631,25 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
        if (hdev->commands[5] & 0x10)
                hci_setup_link_policy(req);
 
-       if (lmp_le_capable(hdev))
+       if (lmp_le_capable(hdev)) {
+               u8 events[8];
+
+               memset(events, 0, sizeof(events));
+               events[0] = 0x1f;
+
+               /* If controller supports the Connection Parameters Request
+                * Link Layer Procedure, enable the corresponding event.
+                */
+               if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
+                       events[0] |= 0x20;      /* LE Remote Connection
+                                                * Parameter Request
+                                                */
+
+               hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
+                           events);
+
                hci_set_le_support(req);
+       }
 
        /* Read features beyond page 1 if available */
        for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
@@ -1752,7 +1675,7 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
 
        /* Enable Secure Connections if supported and configured */
        if ((lmp_sc_capable(hdev) ||
-            test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
+            test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
            test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
                u8 support = 0x01;
                hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
@@ -1809,6 +1732,8 @@ static int __hci_init(struct hci_dev *hdev)
        debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
        debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
                            &blacklist_fops);
+       debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
+                           &whitelist_fops);
        debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
 
        debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
@@ -1830,8 +1755,6 @@ static int __hci_init(struct hci_dev *hdev)
        if (lmp_ssp_capable(hdev)) {
                debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
                                    hdev, &auto_accept_delay_fops);
-               debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
-                                   hdev, &ssp_debug_mode_fops);
                debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
                                    hdev, &force_sc_support_fops);
                debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
@@ -1879,12 +1802,14 @@ static int __hci_init(struct hci_dev *hdev)
                                    hdev, &conn_min_interval_fops);
                debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
                                    hdev, &conn_max_interval_fops);
+               debugfs_create_file("conn_latency", 0644, hdev->debugfs,
+                                   hdev, &conn_latency_fops);
+               debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
+                                   hdev, &supervision_timeout_fops);
                debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
                                    hdev, &adv_channel_map_fops);
-               debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
-                                   &lowpan_debugfs_fops);
-               debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
-                                   &le_auto_conn_fops);
+               debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
+                                   &device_list_fops);
                debugfs_create_u16("discov_interleaved_timeout", 0644,
                                   hdev->debugfs,
                                   &hdev->discov_interleaved_timeout);
@@ -1893,6 +1818,38 @@ static int __hci_init(struct hci_dev *hdev)
        return 0;
 }
 
+static void hci_init0_req(struct hci_request *req, unsigned long opt)
+{
+       struct hci_dev *hdev = req->hdev;
+
+       BT_DBG("%s %ld", hdev->name, opt);
+
+       /* Reset */
+       if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
+               hci_reset_req(req, 0);
+
+       /* Read Local Version */
+       hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
+
+       /* Read BD Address */
+       if (hdev->set_bdaddr)
+               hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
+}
+
+static int __hci_unconf_init(struct hci_dev *hdev)
+{
+       int err;
+
+       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+               return 0;
+
+       err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
 static void hci_scan_req(struct hci_request *req, unsigned long opt)
 {
        __u8 scan = opt;
@@ -1973,16 +1930,20 @@ bool hci_discovery_active(struct hci_dev *hdev)
 
 void hci_discovery_set_state(struct hci_dev *hdev, int state)
 {
+       int old_state = hdev->discovery.state;
+
        BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
 
-       if (hdev->discovery.state == state)
+       if (old_state == state)
                return;
 
+       hdev->discovery.state = state;
+
        switch (state) {
        case DISCOVERY_STOPPED:
                hci_update_background_scan(hdev);
 
-               if (hdev->discovery.state != DISCOVERY_STARTING)
+               if (old_state != DISCOVERY_STARTING)
                        mgmt_discovering(hdev, 0);
                break;
        case DISCOVERY_STARTING:
@@ -1995,8 +1956,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
        case DISCOVERY_STOPPING:
                break;
        }
-
-       hdev->discovery.state = state;
 }
 
 void hci_inquiry_cache_flush(struct hci_dev *hdev)
@@ -2083,22 +2042,24 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
        list_add(&ie->list, pos);
 }
 
-bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
-                             bool name_known, bool *ssp)
+u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
+                            bool name_known)
 {
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *ie;
+       u32 flags = 0;
 
        BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
 
        hci_remove_remote_oob_data(hdev, &data->bdaddr);
 
-       *ssp = data->ssp_mode;
+       if (!data->ssp_mode)
+               flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
 
        ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
        if (ie) {
-               if (ie->data.ssp_mode)
-                       *ssp = true;
+               if (!ie->data.ssp_mode)
+                       flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
 
                if (ie->name_state == NAME_NEEDED &&
                    data->rssi != ie->data.rssi) {
@@ -2111,8 +2072,10 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
 
        /* Entry not in the cache. Add new one. */
        ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
-       if (!ie)
-               return false;
+       if (!ie) {
+               flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
+               goto done;
+       }
 
        list_add(&ie->all, &cache->all);
 
@@ -2135,9 +2098,10 @@ update:
        cache->timestamp = jiffies;
 
        if (ie->name_state == NAME_NOT_KNOWN)
-               return false;
+               flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
 
-       return true;
+done:
+       return flags;
 }
 
 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
@@ -2213,6 +2177,11 @@ int hci_inquiry(void __user *arg)
                goto done;
        }
 
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
        if (hdev->dev_type != HCI_BREDR) {
                err = -EOPNOTSUPP;
                goto done;
@@ -2295,7 +2264,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                goto done;
        }
 
-       if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+       if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+           !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
                /* Check for rfkill but allow the HCI setup stage to
                 * proceed (which in itself doesn't cause any RF activity).
                 */
@@ -2338,14 +2308,47 @@ static int hci_dev_do_open(struct hci_dev *hdev)
        atomic_set(&hdev->cmd_cnt, 1);
        set_bit(HCI_INIT, &hdev->flags);
 
-       if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
-               ret = hdev->setup(hdev);
+       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               if (hdev->setup)
+                       ret = hdev->setup(hdev);
 
-       if (!ret) {
-               if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
-                       set_bit(HCI_RAW, &hdev->flags);
+               /* The transport driver can set these quirks before
+                * creating the HCI device or in its setup callback.
+                *
+                * In case any of them is set, the controller has to
+                * start up as unconfigured.
+                */
+               if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
+                   test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
+                       set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
 
-               if (!test_bit(HCI_RAW, &hdev->flags) &&
+               /* For an unconfigured controller it is required to
+                * read at least the version information provided by
+                * the Read Local Version Information command.
+                *
+                * If the set_bdaddr driver callback is provided, then
+                * also the original Bluetooth public device address
+                * will be read using the Read BD Address command.
+                */
+               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+                       ret = __hci_unconf_init(hdev);
+       }
+
+       if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
+               /* If public address change is configured, ensure that
+                * the address gets programmed. If the driver does not
+                * support changing the public address, fail the power
+                * on procedure.
+                */
+               if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+                   hdev->set_bdaddr)
+                       ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
+               else
+                       ret = -EADDRNOTAVAIL;
+       }
+
+       if (!ret) {
+               if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
                    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
                        ret = __hci_init(hdev);
        }
@@ -2358,6 +2361,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
                if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+                   !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
+                   !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
                    !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
                    hdev->dev_type == HCI_BREDR) {
                        hci_dev_lock(hdev);
@@ -2382,7 +2387,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                }
 
                hdev->close(hdev);
-               hdev->flags = 0;
+               hdev->flags &= BIT(HCI_RAW);
        }
 
 done:
@@ -2401,6 +2406,21 @@ int hci_dev_open(__u16 dev)
        if (!hdev)
                return -ENODEV;
 
+       /* Devices that are marked as unconfigured can only be powered
+        * up as user channel. Trying to bring them up as normal devices
+        * will result into a failure. Only user channel operation is
+        * possible.
+        *
+        * When this function is called for a user channel, the flag
+        * HCI_USER_CHANNEL will be set first before attempting to
+        * open the device.
+        */
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
+           !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
        /* We need to ensure that no other power on/off work is pending
         * before proceeding to call hci_dev_do_open. This is
         * particularly important if the setup procedure has not yet
@@ -2417,11 +2437,22 @@ int hci_dev_open(__u16 dev)
 
        err = hci_dev_do_open(hdev);
 
+done:
        hci_dev_put(hdev);
-
        return err;
 }
 
+/* This function requires the caller holds hdev->lock */
+static void hci_pend_le_actions_clear(struct hci_dev *hdev)
+{
+       struct hci_conn_params *p;
+
+       list_for_each_entry(p, &hdev->le_conn_params, list)
+               list_del_init(&p->action);
+
+       BT_DBG("All LE pending actions cleared");
+}
+
 static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
@@ -2432,7 +2463,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hci_req_lock(hdev);
 
        if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
-               del_timer_sync(&hdev->cmd_timer);
+               cancel_delayed_work_sync(&hdev->cmd_timer);
                hci_req_unlock(hdev);
                return 0;
        }
@@ -2459,7 +2490,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hci_dev_lock(hdev);
        hci_inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
-       hci_pend_le_conns_clear(hdev);
+       hci_pend_le_actions_clear(hdev);
        hci_dev_unlock(hdev);
 
        hci_notify(hdev, HCI_DEV_DOWN);
@@ -2470,8 +2501,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        /* Reset device */
        skb_queue_purge(&hdev->cmd_q);
        atomic_set(&hdev->cmd_cnt, 1);
-       if (!test_bit(HCI_RAW, &hdev->flags) &&
-           !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+       if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
+           !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
            test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -2488,7 +2519,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        /* Drop last sent command */
        if (hdev->sent_cmd) {
-               del_timer_sync(&hdev->cmd_timer);
+               cancel_delayed_work_sync(&hdev->cmd_timer);
                kfree_skb(hdev->sent_cmd);
                hdev->sent_cmd = NULL;
        }
@@ -2501,7 +2532,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hdev->close(hdev);
 
        /* Clear flags */
-       hdev->flags = 0;
+       hdev->flags &= BIT(HCI_RAW);
        hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
 
        if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -2570,6 +2601,11 @@ int hci_dev_reset(__u16 dev)
                goto done;
        }
 
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               ret = -EOPNOTSUPP;
+               goto done;
+       }
+
        /* Drop queues */
        skb_queue_purge(&hdev->rx_q);
        skb_queue_purge(&hdev->cmd_q);
@@ -2585,8 +2621,7 @@ int hci_dev_reset(__u16 dev)
        atomic_set(&hdev->cmd_cnt, 1);
        hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
 
-       if (!test_bit(HCI_RAW, &hdev->flags))
-               ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
+       ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
 
 done:
        hci_req_unlock(hdev);
@@ -2608,6 +2643,11 @@ int hci_dev_reset_stat(__u16 dev)
                goto done;
        }
 
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               ret = -EOPNOTSUPP;
+               goto done;
+       }
+
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
 done:
@@ -2633,6 +2673,11 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
                goto done;
        }
 
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
        if (hdev->dev_type != HCI_BREDR) {
                err = -EOPNOTSUPP;
                goto done;
@@ -2670,6 +2715,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
        case HCISETSCAN:
                err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
                                   HCI_INIT_TIMEOUT);
+
+               /* Ensure that the connectable state gets correctly
+                * notified if the whitelist is in use.
+                */
+               if (!err && !list_empty(&hdev->whitelist)) {
+                       bool changed;
+
+                       if ((dr.dev_opt & SCAN_PAGE))
+                               changed = !test_and_set_bit(HCI_CONNECTABLE,
+                                                           &hdev->dev_flags);
+                       else
+                               changed = test_and_set_bit(HCI_CONNECTABLE,
+                                                          &hdev->dev_flags);
+
+                       if (changed)
+                               mgmt_new_settings(hdev);
+               }
                break;
 
        case HCISETLINKPOL:
@@ -2815,7 +2877,8 @@ static int hci_rfkill_set_block(void *data, bool blocked)
 
        if (blocked) {
                set_bit(HCI_RFKILLED, &hdev->dev_flags);
-               if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+               if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
+                   !test_bit(HCI_CONFIG, &hdev->dev_flags))
                        hci_dev_do_close(hdev);
        } else {
                clear_bit(HCI_RFKILLED, &hdev->dev_flags);
@@ -2846,6 +2909,7 @@ static void hci_power_on(struct work_struct *work)
         * valid, it is important to turn the device back off.
         */
        if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
+           test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
            (hdev->dev_type == HCI_BREDR &&
             !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
             !bacmp(&hdev->static_addr, BDADDR_ANY))) {
@@ -2856,8 +2920,34 @@ static void hci_power_on(struct work_struct *work)
                                   HCI_AUTO_OFF_TIMEOUT);
        }
 
-       if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
+       if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
+               /* For unconfigured devices, set the HCI_RAW flag
+                * so that userspace can easily identify them.
+                */
+               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+                       set_bit(HCI_RAW, &hdev->flags);
+
+               /* For fully configured devices, this will send
+                * the Index Added event. For unconfigured devices,
+                * it will send Unconfigued Index Added event.
+                *
+                * Devices with HCI_QUIRK_RAW_DEVICE are ignored
+                * and no event will be send.
+                */
                mgmt_index_added(hdev);
+       } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
+               /* When the controller is now configured, then it
+                * is important to clear the HCI_RAW flag.
+                */
+               if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+                       clear_bit(HCI_RAW, &hdev->flags);
+
+               /* Powering on the controller with HCI_CONFIG set only
+                * happens with the transition from unconfigured to
+                * configured. This will send the Index Added event.
+                */
+               mgmt_index_added(hdev);
+       }
 }
 
 static void hci_power_off(struct work_struct *work)
@@ -2974,10 +3064,7 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
 
 static bool ltk_type_master(u8 type)
 {
-       if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
-               return true;
-
-       return false;
+       return (type == SMP_LTK);
 }
 
 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
@@ -3049,12 +3136,12 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
        return NULL;
 }
 
-int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
-                    bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
+struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
+                                 bdaddr_t *bdaddr, u8 *val, u8 type,
+                                 u8 pin_len, bool *persistent)
 {
        struct link_key *key, *old_key;
        u8 old_key_type;
-       bool persistent;
 
        old_key = hci_find_link_key(hdev, bdaddr);
        if (old_key) {
@@ -3064,7 +3151,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
                old_key_type = conn ? conn->key_type : 0xff;
                key = kzalloc(sizeof(*key), GFP_KERNEL);
                if (!key)
-                       return -ENOMEM;
+                       return NULL;
                list_add(&key->list, &hdev->link_keys);
        }
 
@@ -3089,17 +3176,11 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
        else
                key->type = type;
 
-       if (!new_key)
-               return 0;
+       if (persistent)
+               *persistent = hci_persistent_key(hdev, conn, type,
+                                                old_key_type);
 
-       persistent = hci_persistent_key(hdev, conn, type, old_key_type);
-
-       mgmt_new_link_key(hdev, key, persistent);
-
-       if (conn)
-               conn->flush_key = !persistent;
-
-       return 0;
+       return key;
 }
 
 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3205,9 +3286,10 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
 }
 
 /* HCI command timer function */
-static void hci_cmd_timeout(unsigned long arg)
+static void hci_cmd_timeout(struct work_struct *work)
 {
-       struct hci_dev *hdev = (void *) arg;
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           cmd_timer.work);
 
        if (hdev->sent_cmd) {
                struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
@@ -3313,12 +3395,12 @@ int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
        return 0;
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
                                         bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *b;
 
-       list_for_each_entry(b, &hdev->blacklist, list) {
+       list_for_each_entry(b, bdaddr_list, list) {
                if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
                        return b;
        }
@@ -3326,11 +3408,11 @@ struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
        return NULL;
 }
 
-static void hci_blacklist_clear(struct hci_dev *hdev)
+void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
 {
        struct list_head *p, *n;
 
-       list_for_each_safe(p, n, &hdev->blacklist) {
+       list_for_each_safe(p, n, bdaddr_list) {
                struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
 
                list_del(p);
@@ -3338,14 +3420,14 @@ static void hci_blacklist_clear(struct hci_dev *hdev)
        }
 }
 
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
        if (!bacmp(bdaddr, BDADDR_ANY))
                return -EBADF;
 
-       if (hci_blacklist_lookup(hdev, bdaddr, type))
+       if (hci_bdaddr_list_lookup(list, bdaddr, type))
                return -EEXIST;
 
        entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -3355,82 +3437,21 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
        bacpy(&entry->bdaddr, bdaddr);
        entry->bdaddr_type = type;
 
-       list_add(&entry->list, &hdev->blacklist);
+       list_add(&entry->list, list);
 
-       return mgmt_device_blocked(hdev, bdaddr, type);
+       return 0;
 }
 
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
        if (!bacmp(bdaddr, BDADDR_ANY)) {
-               hci_blacklist_clear(hdev);
+               hci_bdaddr_list_clear(list);
                return 0;
        }
 
-       entry = hci_blacklist_lookup(hdev, bdaddr, type);
-       if (!entry)
-               return -ENOENT;
-
-       list_del(&entry->list);
-       kfree(entry);
-
-       return mgmt_device_unblocked(hdev, bdaddr, type);
-}
-
-struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
-                                         bdaddr_t *bdaddr, u8 type)
-{
-       struct bdaddr_list *b;
-
-       list_for_each_entry(b, &hdev->le_white_list, list) {
-               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
-                       return b;
-       }
-
-       return NULL;
-}
-
-void hci_white_list_clear(struct hci_dev *hdev)
-{
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &hdev->le_white_list) {
-               struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
-
-               list_del(p);
-               kfree(b);
-       }
-}
-
-int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-       struct bdaddr_list *entry;
-
-       if (!bacmp(bdaddr, BDADDR_ANY))
-               return -EBADF;
-
-       entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
-
-       bacpy(&entry->bdaddr, bdaddr);
-       entry->bdaddr_type = type;
-
-       list_add(&entry->list, &hdev->le_white_list);
-
-       return 0;
-}
-
-int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-       struct bdaddr_list *entry;
-
-       if (!bacmp(bdaddr, BDADDR_ANY))
-               return -EBADF;
-
-       entry = hci_white_list_lookup(hdev, bdaddr, type);
+       entry = hci_bdaddr_list_lookup(list, bdaddr, type);
        if (!entry)
                return -ENOENT;
 
@@ -3446,6 +3467,10 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
 {
        struct hci_conn_params *params;
 
+       /* The conn params list only contains identity addresses */
+       if (!hci_is_identity_address(addr, addr_type))
+               return NULL;
+
        list_for_each_entry(params, &hdev->le_conn_params, list) {
                if (bacmp(&params->addr, addr) == 0 &&
                    params->addr_type == addr_type) {
@@ -3473,62 +3498,97 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
        return true;
 }
 
-static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
+                                                 bdaddr_t *addr, u8 addr_type)
 {
-       if (addr_type == ADDR_LE_DEV_PUBLIC)
-               return true;
+       struct hci_conn_params *param;
 
-       /* Check for Random Static address type */
-       if ((addr->b[5] & 0xc0) == 0xc0)
-               return true;
+       /* The list only contains identity addresses */
+       if (!hci_is_identity_address(addr, addr_type))
+               return NULL;
 
-       return false;
+       list_for_each_entry(param, list, action) {
+               if (bacmp(&param->addr, addr) == 0 &&
+                   param->addr_type == addr_type)
+                       return param;
+       }
+
+       return NULL;
 }
 
 /* This function requires the caller holds hdev->lock */
-int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
-                       u8 auto_connect, u16 conn_min_interval,
-                       u16 conn_max_interval)
+struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
+                                           bdaddr_t *addr, u8 addr_type)
 {
        struct hci_conn_params *params;
 
-       if (!is_identity_address(addr, addr_type))
-               return -EINVAL;
+       if (!hci_is_identity_address(addr, addr_type))
+               return NULL;
 
        params = hci_conn_params_lookup(hdev, addr, addr_type);
        if (params)
-               goto update;
+               return params;
 
        params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params) {
                BT_ERR("Out of memory");
-               return -ENOMEM;
+               return NULL;
        }
 
        bacpy(&params->addr, addr);
        params->addr_type = addr_type;
 
        list_add(&params->list, &hdev->le_conn_params);
+       INIT_LIST_HEAD(&params->action);
 
-update:
-       params->conn_min_interval = conn_min_interval;
-       params->conn_max_interval = conn_max_interval;
-       params->auto_connect = auto_connect;
+       params->conn_min_interval = hdev->le_conn_min_interval;
+       params->conn_max_interval = hdev->le_conn_max_interval;
+       params->conn_latency = hdev->le_conn_latency;
+       params->supervision_timeout = hdev->le_supv_timeout;
+       params->auto_connect = HCI_AUTO_CONN_DISABLED;
+
+       BT_DBG("addr %pMR (type %u)", addr, addr_type);
+
+       return params;
+}
+
+/* This function requires the caller holds hdev->lock */
+int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+                       u8 auto_connect)
+{
+       struct hci_conn_params *params;
+
+       params = hci_conn_params_add(hdev, addr, addr_type);
+       if (!params)
+               return -EIO;
+
+       if (params->auto_connect == auto_connect)
+               return 0;
+
+       list_del_init(&params->action);
 
        switch (auto_connect) {
        case HCI_AUTO_CONN_DISABLED:
        case HCI_AUTO_CONN_LINK_LOSS:
-               hci_pend_le_conn_del(hdev, addr, addr_type);
+               hci_update_background_scan(hdev);
+               break;
+       case HCI_AUTO_CONN_REPORT:
+               list_add(&params->action, &hdev->pend_le_reports);
+               hci_update_background_scan(hdev);
                break;
        case HCI_AUTO_CONN_ALWAYS:
-               if (!is_connected(hdev, addr, addr_type))
-                       hci_pend_le_conn_add(hdev, addr, addr_type);
+               if (!is_connected(hdev, addr, addr_type)) {
+                       list_add(&params->action, &hdev->pend_le_conns);
+                       hci_update_background_scan(hdev);
+               }
                break;
        }
 
-       BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
-              "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
-              conn_min_interval, conn_max_interval);
+       params->auto_connect = auto_connect;
+
+       BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+              auto_connect);
 
        return 0;
 }
@@ -3542,97 +3602,44 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
        if (!params)
                return;
 
-       hci_pend_le_conn_del(hdev, addr, addr_type);
-
+       list_del(&params->action);
        list_del(&params->list);
        kfree(params);
 
+       hci_update_background_scan(hdev);
+
        BT_DBG("addr %pMR (type %u)", addr, addr_type);
 }
 
 /* This function requires the caller holds hdev->lock */
-void hci_conn_params_clear(struct hci_dev *hdev)
+void hci_conn_params_clear_disabled(struct hci_dev *hdev)
 {
        struct hci_conn_params *params, *tmp;
 
        list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+               if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
+                       continue;
                list_del(&params->list);
                kfree(params);
        }
 
-       BT_DBG("All LE connection parameters were removed");
+       BT_DBG("All LE disabled connection parameters were removed");
 }
 
 /* This function requires the caller holds hdev->lock */
-struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
-                                           bdaddr_t *addr, u8 addr_type)
+void hci_conn_params_clear_all(struct hci_dev *hdev)
 {
-       struct bdaddr_list *entry;
-
-       list_for_each_entry(entry, &hdev->pend_le_conns, list) {
-               if (bacmp(&entry->bdaddr, addr) == 0 &&
-                   entry->bdaddr_type == addr_type)
-                       return entry;
-       }
-
-       return NULL;
-}
-
-/* This function requires the caller holds hdev->lock */
-void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
-{
-       struct bdaddr_list *entry;
-
-       entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
-       if (entry)
-               goto done;
+       struct hci_conn_params *params, *tmp;
 
-       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry) {
-               BT_ERR("Out of memory");
-               return;
+       list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+               list_del(&params->action);
+               list_del(&params->list);
+               kfree(params);
        }
 
-       bacpy(&entry->bdaddr, addr);
-       entry->bdaddr_type = addr_type;
-
-       list_add(&entry->list, &hdev->pend_le_conns);
-
-       BT_DBG("addr %pMR (type %u)", addr, addr_type);
-
-done:
        hci_update_background_scan(hdev);
-}
 
-/* This function requires the caller holds hdev->lock */
-void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
-{
-       struct bdaddr_list *entry;
-
-       entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
-       if (!entry)
-               goto done;
-
-       list_del(&entry->list);
-       kfree(entry);
-
-       BT_DBG("addr %pMR (type %u)", addr, addr_type);
-
-done:
-       hci_update_background_scan(hdev);
-}
-
-/* This function requires the caller holds hdev->lock */
-void hci_pend_le_conns_clear(struct hci_dev *hdev)
-{
-       struct bdaddr_list *entry, *tmp;
-
-       list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-
-       BT_DBG("All LE pending connections cleared");
+       BT_DBG("All LE connection parameters were removed");
 }
 
 static void inquiry_complete(struct hci_dev *hdev, u8 status)
@@ -3722,7 +3729,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
         * In this kind of scenario skip the update and let the random
         * address be updated at the next cycle.
         */
-       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
            hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
                BT_DBG("Deferring random address update");
                return;
@@ -3784,7 +3791,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
         * the HCI command if the current random address is already the
         * static one.
         */
-       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
            !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
                *own_addr_type = ADDR_LE_DEV_RANDOM;
                if (bacmp(&hdev->static_addr, &hdev->random_addr))
@@ -3813,7 +3820,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
                               u8 *bdaddr_type)
 {
-       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
            !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
                bacpy(bdaddr, &hdev->static_addr);
                *bdaddr_type = ADDR_LE_DEV_RANDOM;
@@ -3837,6 +3844,7 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->link_mode = (HCI_LM_ACCEPT);
        hdev->num_iac = 0x01;           /* One IAC support is mandatory */
        hdev->io_capability = 0x03;     /* No Input No Output */
+       hdev->manufacturer = 0xffff;    /* Default to internal use */
        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
@@ -3848,6 +3856,8 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->le_scan_window = 0x0030;
        hdev->le_conn_min_interval = 0x0028;
        hdev->le_conn_max_interval = 0x0038;
+       hdev->le_conn_latency = 0x0000;
+       hdev->le_supv_timeout = 0x002a;
 
        hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
        hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
@@ -3859,6 +3869,7 @@ struct hci_dev *hci_alloc_dev(void)
 
        INIT_LIST_HEAD(&hdev->mgmt_pending);
        INIT_LIST_HEAD(&hdev->blacklist);
+       INIT_LIST_HEAD(&hdev->whitelist);
        INIT_LIST_HEAD(&hdev->uuids);
        INIT_LIST_HEAD(&hdev->link_keys);
        INIT_LIST_HEAD(&hdev->long_term_keys);
@@ -3867,6 +3878,7 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_LIST_HEAD(&hdev->le_white_list);
        INIT_LIST_HEAD(&hdev->le_conn_params);
        INIT_LIST_HEAD(&hdev->pend_le_conns);
+       INIT_LIST_HEAD(&hdev->pend_le_reports);
        INIT_LIST_HEAD(&hdev->conn_hash.list);
 
        INIT_WORK(&hdev->rx_work, hci_rx_work);
@@ -3884,7 +3896,7 @@ struct hci_dev *hci_alloc_dev(void)
 
        init_waitqueue_head(&hdev->req_wait_q);
 
-       setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
+       INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
 
        hci_init_sysfs(hdev);
        discovery_init(hdev);
@@ -3906,7 +3918,7 @@ int hci_register_dev(struct hci_dev *hdev)
 {
        int id, error;
 
-       if (!hdev->open || !hdev->close)
+       if (!hdev->open || !hdev->close || !hdev->send)
                return -EINVAL;
 
        /* Do not allow HCI_AMP devices to register at index 0,
@@ -3991,6 +4003,12 @@ int hci_register_dev(struct hci_dev *hdev)
        list_add(&hdev->list, &hci_dev_list);
        write_unlock(&hci_dev_list_lock);
 
+       /* Devices that are marked for raw-only usage are unconfigured
+        * and should not be included in normal operation.
+        */
+       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+               set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
 
@@ -4033,7 +4051,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
        cancel_work_sync(&hdev->power_on);
 
        if (!test_bit(HCI_INIT, &hdev->flags) &&
-           !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+           !test_bit(HCI_SETUP, &hdev->dev_flags) &&
+           !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
                hci_dev_lock(hdev);
                mgmt_index_removed(hdev);
                hci_dev_unlock(hdev);
@@ -4061,15 +4080,15 @@ void hci_unregister_dev(struct hci_dev *hdev)
        destroy_workqueue(hdev->req_workqueue);
 
        hci_dev_lock(hdev);
-       hci_blacklist_clear(hdev);
+       hci_bdaddr_list_clear(&hdev->blacklist);
+       hci_bdaddr_list_clear(&hdev->whitelist);
        hci_uuids_clear(hdev);
        hci_link_keys_clear(hdev);
        hci_smp_ltks_clear(hdev);
        hci_smp_irks_clear(hdev);
        hci_remote_oob_data_clear(hdev);
-       hci_white_list_clear(hdev);
-       hci_conn_params_clear(hdev);
-       hci_pend_le_conns_clear(hdev);
+       hci_bdaddr_list_clear(&hdev->le_white_list);
+       hci_conn_params_clear_all(hdev);
        hci_dev_unlock(hdev);
 
        hci_dev_put(hdev);
@@ -4307,6 +4326,8 @@ EXPORT_SYMBOL(hci_unregister_cb);
 
 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
+       int err;
+
        BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
        /* Time stamp */
@@ -4323,8 +4344,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        /* Get rid of skb owner, prior to sending to the driver. */
        skb_orphan(skb);
 
-       if (hdev->send(hdev, skb) < 0)
-               BT_ERR("%s sending frame failed", hdev->name);
+       err = hdev->send(hdev, skb);
+       if (err < 0) {
+               BT_ERR("%s sending frame failed (%d)", hdev->name, err);
+               kfree_skb(skb);
+       }
 }
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -4798,7 +4822,7 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
 
 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
 {
-       if (!test_bit(HCI_RAW, &hdev->flags)) {
+       if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
                /* ACL tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!cnt && time_after(jiffies, hdev->acl_last_tx +
@@ -4981,7 +5005,7 @@ static void hci_sched_le(struct hci_dev *hdev)
        if (!hci_conn_num(hdev, LE_LINK))
                return;
 
-       if (!test_bit(HCI_RAW, &hdev->flags)) {
+       if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
                /* LE tx timeout must be longer than maximum
                 * link supervision timeout (40.9 seconds) */
                if (!hdev->le_cnt && hdev->le_pkts &&
@@ -5226,8 +5250,7 @@ static void hci_rx_work(struct work_struct *work)
                        hci_send_to_sock(hdev, skb);
                }
 
-               if (test_bit(HCI_RAW, &hdev->flags) ||
-                   test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
                        kfree_skb(skb);
                        continue;
                }
@@ -5287,10 +5310,10 @@ static void hci_cmd_work(struct work_struct *work)
                        atomic_dec(&hdev->cmd_cnt);
                        hci_send_frame(hdev, skb);
                        if (test_bit(HCI_RESET, &hdev->flags))
-                               del_timer(&hdev->cmd_timer);
+                               cancel_delayed_work(&hdev->cmd_timer);
                        else
-                               mod_timer(&hdev->cmd_timer,
-                                         jiffies + HCI_CMD_TIMEOUT);
+                               schedule_delayed_work(&hdev->cmd_timer,
+                                                     HCI_CMD_TIMEOUT);
                } else {
                        skb_queue_head(&hdev->cmd_q, skb);
                        queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -5314,12 +5337,13 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
        struct hci_dev *hdev = req->hdev;
        u8 own_addr_type;
 
-       /* Set require_privacy to true to avoid identification from
-        * unknown peer devices. Since this is passive scanning, no
-        * SCAN_REQ using the local identity should be sent. Mandating
-        * privacy is just an extra precaution.
+       /* Set require_privacy to false since no SCAN_REQ are send
+        * during passive scanning. Not using an unresolvable address
+        * here is important so that peer devices using direct
+        * advertising with our address will be correctly reported
+        * by the controller.
         */
-       if (hci_update_random_address(req, true, &own_addr_type))
+       if (hci_update_random_address(req, false, &own_addr_type))
                return;
 
        memset(&param_cp, 0, sizeof(param_cp));
@@ -5356,11 +5380,30 @@ void hci_update_background_scan(struct hci_dev *hdev)
        struct hci_conn *conn;
        int err;
 
+       if (!test_bit(HCI_UP, &hdev->flags) ||
+           test_bit(HCI_INIT, &hdev->flags) ||
+           test_bit(HCI_SETUP, &hdev->dev_flags) ||
+           test_bit(HCI_CONFIG, &hdev->dev_flags) ||
+           test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
+           test_bit(HCI_UNREGISTER, &hdev->dev_flags))
+               return;
+
+       /* No point in doing scanning if LE support hasn't been enabled */
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return;
+
+       /* If discovery is active don't interfere with it */
+       if (hdev->discovery.state != DISCOVERY_STOPPED)
+               return;
+
        hci_req_init(&req, hdev);
 
-       if (list_empty(&hdev->pend_le_conns)) {
-               /* If there is no pending LE connections, we should stop
-                * the background scanning.
+       if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+           list_empty(&hdev->pend_le_conns) &&
+           list_empty(&hdev->pend_le_reports)) {
+               /* If there is no pending LE connections or devices
+                * to be scanned for, we should stop the background
+                * scanning.
                 */
 
                /* If controller is not scanning we are done. */
index 640c54ec1bd29038101a06e6c1e3098e02336a8d..c8ae9ee3cb12bfcd3f08c980283e2158db29eba0 100644 (file)
@@ -32,6 +32,7 @@
 
 #include "a2mp.h"
 #include "amp.h"
+#include "smp.h"
 
 /* Handle HCI Event packets */
 
@@ -102,9 +103,9 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
        if (conn) {
                if (rp->role)
-                       conn->link_mode &= ~HCI_LM_MASTER;
+                       clear_bit(HCI_CONN_MASTER, &conn->flags);
                else
-                       conn->link_mode |= HCI_LM_MASTER;
+                       set_bit(HCI_CONN_MASTER, &conn->flags);
        }
 
        hci_dev_unlock(hdev);
@@ -174,12 +175,14 @@ static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
        if (!sent)
                return;
 
-       if (!status)
-               hdev->link_policy = get_unaligned_le16(sent);
+       hdev->link_policy = get_unaligned_le16(sent);
 }
 
 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
@@ -269,27 +272,30 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
+       __u8 param;
        void *sent;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
        if (!sent)
                return;
 
-       if (!status) {
-               __u8 param = *((__u8 *) sent);
+       param = *((__u8 *) sent);
 
-               if (param)
-                       set_bit(HCI_ENCRYPT, &hdev->flags);
-               else
-                       clear_bit(HCI_ENCRYPT, &hdev->flags);
-       }
+       if (param)
+               set_bit(HCI_ENCRYPT, &hdev->flags);
+       else
+               clear_bit(HCI_ENCRYPT, &hdev->flags);
 }
 
 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       __u8 param, status = *((__u8 *) skb->data);
+       __u8 status = *((__u8 *) skb->data);
+       __u8 param;
        int old_pscan, old_iscan;
        void *sent;
 
@@ -601,8 +607,10 @@ static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
-               hdev->flow_ctl_mode = rp->mode;
+       if (rp->status)
+               return;
+
+       hdev->flow_ctl_mode = rp->mode;
 }
 
 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
@@ -637,8 +645,14 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
+       if (rp->status)
+               return;
+
+       if (test_bit(HCI_INIT, &hdev->flags))
                bacpy(&hdev->bdaddr, &rp->bdaddr);
+
+       if (test_bit(HCI_SETUP, &hdev->dev_flags))
+               bacpy(&hdev->setup_addr, &rp->bdaddr);
 }
 
 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
@@ -648,7 +662,10 @@ static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
+       if (rp->status)
+               return;
+
+       if (test_bit(HCI_INIT, &hdev->flags)) {
                hdev->page_scan_interval = __le16_to_cpu(rp->interval);
                hdev->page_scan_window = __le16_to_cpu(rp->window);
        }
@@ -680,7 +697,10 @@ static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
+       if (rp->status)
+               return;
+
+       if (test_bit(HCI_INIT, &hdev->flags))
                hdev->page_scan_type = rp->type;
 }
 
@@ -720,6 +740,41 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
               hdev->block_cnt, hdev->block_len);
 }
 
+static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_rp_read_clock *rp = (void *) skb->data;
+       struct hci_cp_read_clock *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s", hdev->name);
+
+       if (skb->len < sizeof(*rp))
+               return;
+
+       if (rp->status)
+               return;
+
+       hci_dev_lock(hdev);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
+       if (!cp)
+               goto unlock;
+
+       if (cp->which == 0x00) {
+               hdev->clock = le32_to_cpu(rp->clock);
+               goto unlock;
+       }
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
+       if (conn) {
+               conn->clock = le32_to_cpu(rp->clock);
+               conn->clock_accuracy = le16_to_cpu(rp->accuracy);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
                                       struct sk_buff *skb)
 {
@@ -789,8 +844,10 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
-               hdev->inq_tx_power = rp->tx_power;
+       if (rp->status)
+               return;
+
+       hdev->inq_tx_power = rp->tx_power;
 }
 
 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -861,8 +918,10 @@ static void hci_cc_le_read_local_features(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
-               memcpy(hdev->le_features, rp->features, 8);
+       if (rp->status)
+               return;
+
+       memcpy(hdev->le_features, rp->features, 8);
 }
 
 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
@@ -872,8 +931,10 @@ static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
-               hdev->adv_tx_power = rp->tx_power;
+       if (rp->status)
+               return;
+
+       hdev->adv_tx_power = rp->tx_power;
 }
 
 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -973,14 +1034,16 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
        if (!sent)
                return;
 
        hci_dev_lock(hdev);
 
-       if (!status)
-               bacpy(&hdev->random_addr, sent);
+       bacpy(&hdev->random_addr, sent);
 
        hci_dev_unlock(hdev);
 }
@@ -991,11 +1054,11 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
-       if (!sent)
+       if (status)
                return;
 
-       if (status)
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
+       if (!sent)
                return;
 
        hci_dev_lock(hdev);
@@ -1006,15 +1069,17 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
        if (*sent) {
                struct hci_conn *conn;
 
+               set_bit(HCI_LE_ADV, &hdev->dev_flags);
+
                conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
                if (conn)
                        queue_delayed_work(hdev->workqueue,
                                           &conn->le_conn_timeout,
-                                          HCI_LE_CONN_TIMEOUT);
+                                          conn->conn_timeout);
+       } else {
+               clear_bit(HCI_LE_ADV, &hdev->dev_flags);
        }
 
-       mgmt_advertising(hdev, *sent);
-
        hci_dev_unlock(hdev);
 }
 
@@ -1025,14 +1090,16 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
        if (!cp)
                return;
 
        hci_dev_lock(hdev);
 
-       if (!status)
-               hdev->le_scan_type = cp->type;
+       hdev->le_scan_type = cp->type;
 
        hci_dev_unlock(hdev);
 }
@@ -1053,13 +1120,15 @@ static void clear_pending_adv_report(struct hci_dev *hdev)
 }
 
 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                    u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
+                                    u8 bdaddr_type, s8 rssi, u32 flags,
+                                    u8 *data, u8 len)
 {
        struct discovery_state *d = &hdev->discovery;
 
        bacpy(&d->last_adv_addr, bdaddr);
        d->last_adv_addr_type = bdaddr_type;
        d->last_adv_rssi = rssi;
+       d->last_adv_flags = flags;
        memcpy(d->last_adv_data, data, len);
        d->last_adv_data_len = len;
 }
@@ -1072,11 +1141,11 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
-       if (!cp)
+       if (status)
                return;
 
-       if (status)
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
+       if (!cp)
                return;
 
        switch (cp->enable) {
@@ -1096,7 +1165,7 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
 
                        mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
                                          d->last_adv_addr_type, NULL,
-                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_rssi, d->last_adv_flags,
                                          d->last_adv_data,
                                          d->last_adv_data_len, NULL, 0);
                }
@@ -1107,13 +1176,21 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                cancel_delayed_work(&hdev->le_scan_disable);
 
                clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+
                /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
                 * interrupted scanning due to a connect request. Mark
-                * therefore discovery as stopped.
+                * therefore discovery as stopped. If this was not
+                * because of a connect request advertising might have
+                * been disabled because of active scanning, so
+                * re-enable it again if necessary.
                 */
                if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
                                       &hdev->dev_flags))
                        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
+                        hdev->discovery.state == DISCOVERY_FINDING)
+                       mgmt_reenable_advertising(hdev);
+
                break;
 
        default:
@@ -1129,8 +1206,10 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
 
-       if (!rp->status)
-               hdev->le_white_list_size = rp->size;
+       if (rp->status)
+               return;
+
+       hdev->le_white_list_size = rp->size;
 }
 
 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
@@ -1140,8 +1219,10 @@ static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       if (!status)
-               hci_white_list_clear(hdev);
+       if (status)
+               return;
+
+       hci_bdaddr_list_clear(&hdev->le_white_list);
 }
 
 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
@@ -1152,12 +1233,15 @@ static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
        if (!sent)
                return;
 
-       if (!status)
-               hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
+       hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
+                          sent->bdaddr_type);
 }
 
 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
@@ -1168,12 +1252,15 @@ static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
        if (!sent)
                return;
 
-       if (!status)
-               hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
+       hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
+                           sent->bdaddr_type);
 }
 
 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
@@ -1183,8 +1270,10 @@ static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
-       if (!rp->status)
-               memcpy(hdev->le_states, rp->le_states, 8);
+       if (rp->status)
+               return;
+
+       memcpy(hdev->le_states, rp->le_states, 8);
 }
 
 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
@@ -1195,25 +1284,26 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
+       if (status)
+               return;
+
        sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
        if (!sent)
                return;
 
-       if (!status) {
-               if (sent->le) {
-                       hdev->features[1][0] |= LMP_HOST_LE;
-                       set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
-               } else {
-                       hdev->features[1][0] &= ~LMP_HOST_LE;
-                       clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
-                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
-               }
-
-               if (sent->simul)
-                       hdev->features[1][0] |= LMP_HOST_LE_BREDR;
-               else
-                       hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
+       if (sent->le) {
+               hdev->features[1][0] |= LMP_HOST_LE;
+               set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+       } else {
+               hdev->features[1][0] &= ~LMP_HOST_LE;
+               clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
        }
+
+       if (sent->simul)
+               hdev->features[1][0] |= LMP_HOST_LE_BREDR;
+       else
+               hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
 }
 
 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1345,7 +1435,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
                        conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
                        if (conn) {
                                conn->out = true;
-                               conn->link_mode |= HCI_LM_MASTER;
+                               set_bit(HCI_CONN_MASTER, &conn->flags);
                        } else
                                BT_ERR("No memory for new connection");
                }
@@ -1835,7 +1925,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
        if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
                queue_delayed_work(conn->hdev->workqueue,
                                   &conn->le_conn_timeout,
-                                  HCI_LE_CONN_TIMEOUT);
+                                  conn->conn_timeout);
 
 unlock:
        hci_dev_unlock(hdev);
@@ -1929,7 +2019,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_lock(hdev);
 
        for (; num_rsp; num_rsp--, info++) {
-               bool name_known, ssp;
+               u32 flags;
 
                bacpy(&data.bdaddr, &info->bdaddr);
                data.pscan_rep_mode     = info->pscan_rep_mode;
@@ -1940,10 +2030,10 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
                data.rssi               = 0x00;
                data.ssp_mode           = 0x00;
 
-               name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
+               flags = hci_inquiry_cache_update(hdev, &data, false);
+
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
-                                 info->dev_class, 0, !name_known, ssp, NULL,
-                                 0, NULL, 0);
+                                 info->dev_class, 0, flags, NULL, 0, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -1988,10 +2078,10 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_conn_add_sysfs(conn);
 
                if (test_bit(HCI_AUTH, &hdev->flags))
-                       conn->link_mode |= HCI_LM_AUTH;
+                       set_bit(HCI_CONN_AUTH, &conn->flags);
 
                if (test_bit(HCI_ENCRYPT, &hdev->flags))
-                       conn->link_mode |= HCI_LM_ENCRYPT;
+                       set_bit(HCI_CONN_ENCRYPT, &conn->flags);
 
                /* Get remote features */
                if (conn->type == ACL_LINK) {
@@ -2031,10 +2121,21 @@ unlock:
        hci_conn_check_pending(hdev);
 }
 
+static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct hci_cp_reject_conn_req cp;
+
+       bacpy(&cp.bdaddr, bdaddr);
+       cp.reason = HCI_ERROR_REJ_BAD_ADDR;
+       hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
+}
+
 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_conn_request *ev = (void *) skb->data;
        int mask = hdev->link_mode;
+       struct inquiry_entry *ie;
+       struct hci_conn *conn;
        __u8 flags = 0;
 
        BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
@@ -2043,73 +2144,79 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
                                      &flags);
 
-       if ((mask & HCI_LM_ACCEPT) &&
-           !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
-               /* Connection accepted */
-               struct inquiry_entry *ie;
-               struct hci_conn *conn;
+       if (!(mask & HCI_LM_ACCEPT)) {
+               hci_reject_conn(hdev, &ev->bdaddr);
+               return;
+       }
 
-               hci_dev_lock(hdev);
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
+               if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
+                                          BDADDR_BREDR)) {
+                       hci_reject_conn(hdev, &ev->bdaddr);
+                       return;
+               }
+       } else {
+               if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
+                                           BDADDR_BREDR)) {
+                       hci_reject_conn(hdev, &ev->bdaddr);
+                       return;
+               }
+       }
 
-               ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
-               if (ie)
-                       memcpy(ie->data.dev_class, ev->dev_class, 3);
+       /* Connection accepted */
 
-               conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
-                                              &ev->bdaddr);
+       hci_dev_lock(hdev);
+
+       ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+       if (ie)
+               memcpy(ie->data.dev_class, ev->dev_class, 3);
+
+       conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+                       &ev->bdaddr);
+       if (!conn) {
+               conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
                if (!conn) {
-                       conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
-                       if (!conn) {
-                               BT_ERR("No memory for new connection");
-                               hci_dev_unlock(hdev);
-                               return;
-                       }
+                       BT_ERR("No memory for new connection");
+                       hci_dev_unlock(hdev);
+                       return;
                }
+       }
 
-               memcpy(conn->dev_class, ev->dev_class, 3);
+       memcpy(conn->dev_class, ev->dev_class, 3);
 
-               hci_dev_unlock(hdev);
+       hci_dev_unlock(hdev);
 
-               if (ev->link_type == ACL_LINK ||
-                   (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
-                       struct hci_cp_accept_conn_req cp;
-                       conn->state = BT_CONNECT;
+       if (ev->link_type == ACL_LINK ||
+           (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
+               struct hci_cp_accept_conn_req cp;
+               conn->state = BT_CONNECT;
 
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
+               bacpy(&cp.bdaddr, &ev->bdaddr);
 
-                       if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
-                               cp.role = 0x00; /* Become master */
-                       else
-                               cp.role = 0x01; /* Remain slave */
+               if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
+                       cp.role = 0x00; /* Become master */
+               else
+                       cp.role = 0x01; /* Remain slave */
 
-                       hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
-                                    &cp);
-               } else if (!(flags & HCI_PROTO_DEFER)) {
-                       struct hci_cp_accept_sync_conn_req cp;
-                       conn->state = BT_CONNECT;
+               hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
+       } else if (!(flags & HCI_PROTO_DEFER)) {
+               struct hci_cp_accept_sync_conn_req cp;
+               conn->state = BT_CONNECT;
 
-                       bacpy(&cp.bdaddr, &ev->bdaddr);
-                       cp.pkt_type = cpu_to_le16(conn->pkt_type);
+               bacpy(&cp.bdaddr, &ev->bdaddr);
+               cp.pkt_type = cpu_to_le16(conn->pkt_type);
 
-                       cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
-                       cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
-                       cp.max_latency    = cpu_to_le16(0xffff);
-                       cp.content_format = cpu_to_le16(hdev->voice_setting);
-                       cp.retrans_effort = 0xff;
+               cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
+               cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
+               cp.max_latency    = cpu_to_le16(0xffff);
+               cp.content_format = cpu_to_le16(hdev->voice_setting);
+               cp.retrans_effort = 0xff;
 
-                       hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
-                                    sizeof(cp), &cp);
-               } else {
-                       conn->state = BT_CONNECT2;
-                       hci_proto_connect_cfm(conn, 0);
-               }
+               hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
+                            &cp);
        } else {
-               /* Connection rejected */
-               struct hci_cp_reject_conn_req cp;
-
-               bacpy(&cp.bdaddr, &ev->bdaddr);
-               cp.reason = HCI_ERROR_REJ_BAD_ADDR;
-               hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
+               conn->state = BT_CONNECT2;
+               hci_proto_connect_cfm(conn, 0);
        }
 }
 
@@ -2158,7 +2265,8 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
                                reason, mgmt_connected);
 
-       if (conn->type == ACL_LINK && conn->flush_key)
+       if (conn->type == ACL_LINK &&
+           test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
                hci_remove_link_key(hdev, &conn->dst);
 
        params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
@@ -2170,7 +2278,9 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        /* Fall through */
 
                case HCI_AUTO_CONN_ALWAYS:
-                       hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
+                       list_del_init(&params->action);
+                       list_add(&params->action, &hdev->pend_le_conns);
+                       hci_update_background_scan(hdev);
                        break;
 
                default:
@@ -2218,7 +2328,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                    test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
                        BT_INFO("re-auth of legacy device is not possible.");
                } else {
-                       conn->link_mode |= HCI_LM_AUTH;
+                       set_bit(HCI_CONN_AUTH, &conn->flags);
                        conn->sec_level = conn->pending_sec_level;
                }
        } else {
@@ -2321,19 +2431,19 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (!ev->status) {
                if (ev->encrypt) {
                        /* Encryption implies authentication */
-                       conn->link_mode |= HCI_LM_AUTH;
-                       conn->link_mode |= HCI_LM_ENCRYPT;
+                       set_bit(HCI_CONN_AUTH, &conn->flags);
+                       set_bit(HCI_CONN_ENCRYPT, &conn->flags);
                        conn->sec_level = conn->pending_sec_level;
 
                        /* P-256 authentication key implies FIPS */
                        if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
-                               conn->link_mode |= HCI_LM_FIPS;
+                               set_bit(HCI_CONN_FIPS, &conn->flags);
 
                        if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
                            conn->type == LE_LINK)
                                set_bit(HCI_CONN_AES_CCM, &conn->flags);
                } else {
-                       conn->link_mode &= ~HCI_LM_ENCRYPT;
+                       clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
                        clear_bit(HCI_CONN_AES_CCM, &conn->flags);
                }
        }
@@ -2384,7 +2494,7 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
        if (conn) {
                if (!ev->status)
-                       conn->link_mode |= HCI_LM_SECURE;
+                       set_bit(HCI_CONN_SECURE, &conn->flags);
 
                clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
 
@@ -2595,6 +2705,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_read_local_amp_info(hdev, skb);
                break;
 
+       case HCI_OP_READ_CLOCK:
+               hci_cc_read_clock(hdev, skb);
+               break;
+
        case HCI_OP_READ_LOCAL_AMP_ASSOC:
                hci_cc_read_local_amp_assoc(hdev, skb);
                break;
@@ -2709,7 +2823,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 
        if (opcode != HCI_OP_NOP)
-               del_timer(&hdev->cmd_timer);
+               cancel_delayed_work(&hdev->cmd_timer);
 
        hci_req_cmd_complete(hdev, opcode, status);
 
@@ -2800,7 +2914,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 
        if (opcode != HCI_OP_NOP)
-               del_timer(&hdev->cmd_timer);
+               cancel_delayed_work(&hdev->cmd_timer);
 
        if (ev->status ||
            (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
@@ -2826,9 +2940,9 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (conn) {
                if (!ev->status) {
                        if (ev->role)
-                               conn->link_mode &= ~HCI_LM_MASTER;
+                               clear_bit(HCI_CONN_MASTER, &conn->flags);
                        else
-                               conn->link_mode |= HCI_LM_MASTER;
+                               set_bit(HCI_CONN_MASTER, &conn->flags);
                }
 
                clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
@@ -3065,12 +3179,6 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
               &ev->bdaddr);
 
-       if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
-           key->type == HCI_LK_DEBUG_COMBINATION) {
-               BT_DBG("%s ignoring debug key", hdev->name);
-               goto not_found;
-       }
-
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
        if (conn) {
                if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
@@ -3110,6 +3218,8 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_link_key_notify *ev = (void *) skb->data;
        struct hci_conn *conn;
+       struct link_key *key;
+       bool persistent;
        u8 pin_len = 0;
 
        BT_DBG("%s", hdev->name);
@@ -3128,10 +3238,33 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_conn_drop(conn);
        }
 
-       if (test_bit(HCI_MGMT, &hdev->dev_flags))
-               hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
-                                ev->key_type, pin_len);
+       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+               goto unlock;
+
+       key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
+                               ev->key_type, pin_len, &persistent);
+       if (!key)
+               goto unlock;
 
+       mgmt_new_link_key(hdev, key, persistent);
+
+       /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
+        * is set. If it's not set simply remove the key from the kernel
+        * list (we've still notified user space about it but with
+        * store_hint being 0).
+        */
+       if (key->type == HCI_LK_DEBUG_COMBINATION &&
+           !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
+               list_del(&key->list);
+               kfree(key);
+       } else if (conn) {
+               if (persistent)
+                       clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
+               else
+                       set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
+       }
+
+unlock:
        hci_dev_unlock(hdev);
 }
 
@@ -3197,7 +3330,6 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
 {
        struct inquiry_data data;
        int num_rsp = *((__u8 *) skb->data);
-       bool name_known, ssp;
 
        BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
 
@@ -3214,6 +3346,8 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                info = (void *) (skb->data + 1);
 
                for (; num_rsp; num_rsp--, info++) {
+                       u32 flags;
+
                        bacpy(&data.bdaddr, &info->bdaddr);
                        data.pscan_rep_mode     = info->pscan_rep_mode;
                        data.pscan_period_mode  = info->pscan_period_mode;
@@ -3223,16 +3357,18 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
 
-                       name_known = hci_inquiry_cache_update(hdev, &data,
-                                                             false, &ssp);
+                       flags = hci_inquiry_cache_update(hdev, &data, false);
+
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0, NULL, 0);
+                                         flags, NULL, 0, NULL, 0);
                }
        } else {
                struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
 
                for (; num_rsp; num_rsp--, info++) {
+                       u32 flags;
+
                        bacpy(&data.bdaddr, &info->bdaddr);
                        data.pscan_rep_mode     = info->pscan_rep_mode;
                        data.pscan_period_mode  = info->pscan_period_mode;
@@ -3241,11 +3377,12 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
                        data.clock_offset       = info->clock_offset;
                        data.rssi               = info->rssi;
                        data.ssp_mode           = 0x00;
-                       name_known = hci_inquiry_cache_update(hdev, &data,
-                                                             false, &ssp);
+
+                       flags = hci_inquiry_cache_update(hdev, &data, false);
+
                        mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
                                          info->dev_class, info->rssi,
-                                         !name_known, ssp, NULL, 0, NULL, 0);
+                                         flags, NULL, 0, NULL, 0);
                }
        }
 
@@ -3348,6 +3485,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
                hci_conn_add_sysfs(conn);
                break;
 
+       case 0x10:      /* Connection Accept Timeout */
        case 0x0d:      /* Connection Rejected due to Limited Resources */
        case 0x11:      /* Unsupported Feature or Parameter Value */
        case 0x1c:      /* SCO interval rejected */
@@ -3411,7 +3549,8 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
        hci_dev_lock(hdev);
 
        for (; num_rsp; num_rsp--, info++) {
-               bool name_known, ssp;
+               u32 flags;
+               bool name_known;
 
                bacpy(&data.bdaddr, &info->bdaddr);
                data.pscan_rep_mode     = info->pscan_rep_mode;
@@ -3429,12 +3568,13 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
                else
                        name_known = true;
 
-               name_known = hci_inquiry_cache_update(hdev, &data, name_known,
-                                                     &ssp);
+               flags = hci_inquiry_cache_update(hdev, &data, name_known);
+
                eir_len = eir_get_length(info->data, sizeof(info->data));
+
                mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
-                                 info->dev_class, info->rssi, !name_known,
-                                 ssp, info->data, eir_len, NULL, 0);
+                                 info->dev_class, info->rssi,
+                                 flags, info->data, eir_len, NULL, 0);
        }
 
        hci_dev_unlock(hdev);
@@ -3967,13 +4107,20 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+       struct hci_conn_params *params;
        struct hci_conn *conn;
        struct smp_irk *irk;
+       u8 addr_type;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
 
        hci_dev_lock(hdev);
 
+       /* All controllers implicitly stop advertising in the event of a
+        * connection, so ensure that the state bit is cleared.
+        */
+       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
+
        conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
        if (!conn) {
                conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3986,7 +4133,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                if (ev->role == LE_CONN_ROLE_MASTER) {
                        conn->out = true;
-                       conn->link_mode |= HCI_LM_MASTER;
+                       set_bit(HCI_CONN_MASTER, &conn->flags);
                }
 
                /* If we didn't have a hci_conn object previously
@@ -4025,6 +4172,14 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                conn->init_addr_type = ev->bdaddr_type;
                bacpy(&conn->init_addr, &ev->bdaddr);
+
+               /* For incoming connections, set the default minimum
+                * and maximum connection interval. They will be used
+                * to check if the parameters are in range and if not
+                * trigger the connection update procedure.
+                */
+               conn->le_conn_min_interval = hdev->le_conn_min_interval;
+               conn->le_conn_max_interval = hdev->le_conn_max_interval;
        }
 
        /* Lookup the identity address from the stored connection
@@ -4042,6 +4197,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                conn->dst_type = irk->addr_type;
        }
 
+       if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
+               addr_type = BDADDR_LE_PUBLIC;
+       else
+               addr_type = BDADDR_LE_RANDOM;
+
+       /* Drop the connection if he device is blocked */
+       if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
+               hci_conn_drop(conn);
+               goto unlock;
+       }
+
        if (ev->status) {
                hci_le_conn_failed(conn, ev->status);
                goto unlock;
@@ -4055,40 +4221,75 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        conn->handle = __le16_to_cpu(ev->handle);
        conn->state = BT_CONNECTED;
 
-       if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
-               set_bit(HCI_CONN_6LOWPAN, &conn->flags);
+       conn->le_conn_interval = le16_to_cpu(ev->interval);
+       conn->le_conn_latency = le16_to_cpu(ev->latency);
+       conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
 
        hci_conn_add_sysfs(conn);
 
        hci_proto_connect_cfm(conn, ev->status);
 
-       hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
+       params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+       if (params)
+               list_del_init(&params->action);
 
 unlock:
+       hci_update_background_scan(hdev);
+       hci_dev_unlock(hdev);
+}
+
+static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
+                                           struct sk_buff *skb)
+{
+       struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+
+       if (ev->status)
+               return;
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
+       if (conn) {
+               conn->le_conn_interval = le16_to_cpu(ev->interval);
+               conn->le_conn_latency = le16_to_cpu(ev->latency);
+               conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
+       }
+
        hci_dev_unlock(hdev);
 }
 
 /* This function requires the caller holds hdev->lock */
 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
-                                 u8 addr_type)
+                                 u8 addr_type, u8 adv_type)
 {
        struct hci_conn *conn;
-       struct smp_irk *irk;
 
-       /* If this is a resolvable address, we should resolve it and then
-        * update address and address type variables.
-        */
-       irk = hci_get_irk(hdev, addr, addr_type);
-       if (irk) {
-               addr = &irk->bdaddr;
-               addr_type = irk->addr_type;
-       }
+       /* If the event is not connectable don't proceed further */
+       if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
+               return;
 
-       if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
+       /* Ignore if the device is blocked */
+       if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
                return;
 
+       /* If we're connectable, always connect any ADV_DIRECT_IND event */
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+           adv_type == LE_ADV_DIRECT_IND)
+               goto connect;
+
+       /* If we're not connectable only connect devices that we have in
+        * our pend_le_conns list.
+        */
+       if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
+               return;
+
+connect:
+       /* Request connection in master = true role */
        conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
-                             HCI_AT_NO_BONDING);
+                             HCI_LE_AUTOCONN_TIMEOUT, true);
        if (!IS_ERR(conn))
                return;
 
@@ -4109,15 +4310,65 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                               u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
 {
        struct discovery_state *d = &hdev->discovery;
+       struct smp_irk *irk;
        bool match;
+       u32 flags;
+
+       /* Check if we need to convert to identity address */
+       irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
+       if (irk) {
+               bdaddr = &irk->bdaddr;
+               bdaddr_type = irk->addr_type;
+       }
+
+       /* Check if we have been requested to connect to this device */
+       check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
 
-       /* Passive scanning shouldn't trigger any device found events */
+       /* Passive scanning shouldn't trigger any device found events,
+        * except for devices marked as CONN_REPORT for which we do send
+        * device found events.
+        */
        if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
-               if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
-                       check_pending_le_conn(hdev, bdaddr, bdaddr_type);
+               struct hci_conn_params *param;
+
+               if (type == LE_ADV_DIRECT_IND)
+                       return;
+
+               param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
+                                                 bdaddr, bdaddr_type);
+               if (!param)
+                       return;
+
+               if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
+                       flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
+               else
+                       flags = 0;
+               mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
+                                 rssi, flags, data, len, NULL, 0);
                return;
        }
 
+       /* When receiving non-connectable or scannable undirected
+        * advertising reports, this means that the remote device is
+        * not connectable and then clearly indicate this in the
+        * device found event.
+        *
+        * When receiving a scan response, then there is no way to
+        * know if the remote device is connectable or not. However
+        * since scan responses are merged with a previously seen
+        * advertising report, the flags field from that report
+        * will be used.
+        *
+        * In the really unlikely case that a controller get confused
+        * and just sends a scan response event, then it is marked as
+        * not connectable as well.
+        */
+       if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
+           type == LE_ADV_SCAN_RSP)
+               flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
+       else
+               flags = 0;
+
        /* If there's nothing pending either store the data from this
         * event or send an immediate device found event if the data
         * should not be stored for later.
@@ -4128,12 +4379,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                 */
                if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
                        store_pending_adv_report(hdev, bdaddr, bdaddr_type,
-                                                rssi, data, len);
+                                                rssi, flags, data, len);
                        return;
                }
 
                mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
-                                 rssi, 0, 1, data, len, NULL, 0);
+                                 rssi, flags, data, len, NULL, 0);
                return;
        }
 
@@ -4150,7 +4401,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                if (!match)
                        mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
                                          d->last_adv_addr_type, NULL,
-                                         d->last_adv_rssi, 0, 1,
+                                         d->last_adv_rssi, d->last_adv_flags,
                                          d->last_adv_data,
                                          d->last_adv_data_len, NULL, 0);
 
@@ -4159,7 +4410,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                 */
                if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
                        store_pending_adv_report(hdev, bdaddr, bdaddr_type,
-                                                rssi, data, len);
+                                                rssi, flags, data, len);
                        return;
                }
 
@@ -4168,7 +4419,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                 */
                clear_pending_adv_report(hdev);
                mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
-                                 rssi, 0, 1, data, len, NULL, 0);
+                                 rssi, flags, data, len, NULL, 0);
                return;
        }
 
@@ -4177,8 +4428,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
         * sending a merged device found event.
         */
        mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
-                         d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
-                         d->last_adv_data, d->last_adv_data_len);
+                         d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
+                         d->last_adv_data, d->last_adv_data_len, data, len);
        clear_pending_adv_report(hdev);
 }
 
@@ -4241,9 +4492,12 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
         * distribute the keys. Later, security can be re-established
         * using a distributed LTK.
         */
-       if (ltk->type == HCI_SMP_STK_SLAVE) {
+       if (ltk->type == SMP_STK) {
+               set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
                list_del(&ltk->list);
                kfree(ltk);
+       } else {
+               clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
        }
 
        hci_dev_unlock(hdev);
@@ -4256,6 +4510,76 @@ not_found:
        hci_dev_unlock(hdev);
 }
 
+static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
+                                     u8 reason)
+{
+       struct hci_cp_le_conn_param_req_neg_reply cp;
+
+       cp.handle = cpu_to_le16(handle);
+       cp.reason = reason;
+
+       hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
+                    &cp);
+}
+
+static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
+                                            struct sk_buff *skb)
+{
+       struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
+       struct hci_cp_le_conn_param_req_reply cp;
+       struct hci_conn *hcon;
+       u16 handle, min, max, latency, timeout;
+
+       handle = le16_to_cpu(ev->handle);
+       min = le16_to_cpu(ev->interval_min);
+       max = le16_to_cpu(ev->interval_max);
+       latency = le16_to_cpu(ev->latency);
+       timeout = le16_to_cpu(ev->timeout);
+
+       hcon = hci_conn_hash_lookup_handle(hdev, handle);
+       if (!hcon || hcon->state != BT_CONNECTED)
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_UNKNOWN_CONN_ID);
+
+       if (hci_check_conn_params(min, max, latency, timeout))
+               return send_conn_param_neg_reply(hdev, handle,
+                                                HCI_ERROR_INVALID_LL_PARAMS);
+
+       if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
+               struct hci_conn_params *params;
+               u8 store_hint;
+
+               hci_dev_lock(hdev);
+
+               params = hci_conn_params_lookup(hdev, &hcon->dst,
+                                               hcon->dst_type);
+               if (params) {
+                       params->conn_min_interval = min;
+                       params->conn_max_interval = max;
+                       params->conn_latency = latency;
+                       params->supervision_timeout = timeout;
+                       store_hint = 0x01;
+               } else{
+                       store_hint = 0x00;
+               }
+
+               hci_dev_unlock(hdev);
+
+               mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
+                                   store_hint, min, max, latency, timeout);
+       }
+
+       cp.handle = ev->handle;
+       cp.interval_min = ev->interval_min;
+       cp.interval_max = ev->interval_max;
+       cp.latency = ev->latency;
+       cp.timeout = ev->timeout;
+       cp.min_ce_len = 0;
+       cp.max_ce_len = 0;
+
+       hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
+}
+
 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -4267,6 +4591,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_le_conn_complete_evt(hdev, skb);
                break;
 
+       case HCI_EV_LE_CONN_UPDATE_COMPLETE:
+               hci_le_conn_update_complete_evt(hdev, skb);
+               break;
+
        case HCI_EV_LE_ADVERTISING_REPORT:
                hci_le_adv_report_evt(hdev, skb);
                break;
@@ -4275,6 +4603,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_le_ltk_request_evt(hdev, skb);
                break;
 
+       case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
+               hci_le_remote_conn_param_req_evt(hdev, skb);
+               break;
+
        default:
                break;
        }
index 80d25c150a653b57977c5b56c227908e20f4fa32..c64728d571ae7648711702552a77425437094fdd 100644 (file)
@@ -481,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
+       err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 
        hci_dev_unlock(hdev);
 
@@ -498,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
+       err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
 
        hci_dev_unlock(hdev);
 
@@ -517,6 +517,9 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
        if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
                return -EBUSY;
 
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               return -EOPNOTSUPP;
+
        if (hdev->dev_type != HCI_BREDR)
                return -EOPNOTSUPP;
 
@@ -690,7 +693,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
 
                if (test_bit(HCI_UP, &hdev->flags) ||
                    test_bit(HCI_INIT, &hdev->flags) ||
-                   test_bit(HCI_SETUP, &hdev->dev_flags)) {
+                   test_bit(HCI_SETUP, &hdev->dev_flags) ||
+                   test_bit(HCI_CONFIG, &hdev->dev_flags)) {
                        err = -EBUSY;
                        hci_dev_put(hdev);
                        goto done;
@@ -960,7 +964,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        goto drop;
                }
 
-               if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
+               if (ogf == 0x3f) {
                        skb_queue_tail(&hdev->raw_q, skb);
                        queue_work(hdev->workqueue, &hdev->tx_work);
                } else {
index 323f23cd2c37c4a9b95f3c195c48d170d9e73404..8680aae678ce012a0b62a206d14dc96936122a0e 100644 (file)
@@ -40,7 +40,6 @@
 #include "smp.h"
 #include "a2mp.h"
 #include "amp.h"
-#include "6lowpan.h"
 
 #define LE_FLOWCTL_MAX_CREDITS 65535
 
@@ -205,6 +204,7 @@ done:
        write_unlock(&chan_list_lock);
        return err;
 }
+EXPORT_SYMBOL_GPL(l2cap_add_psm);
 
 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
 {
@@ -437,6 +437,7 @@ struct l2cap_chan *l2cap_chan_create(void)
 
        return chan;
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_create);
 
 static void l2cap_chan_destroy(struct kref *kref)
 {
@@ -464,6 +465,7 @@ void l2cap_chan_put(struct l2cap_chan *c)
 
        kref_put(&c->kref, l2cap_chan_destroy);
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_put);
 
 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
 {
@@ -482,6 +484,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
 
        set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
 
 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
 {
@@ -614,6 +617,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
 
        return;
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_del);
 
 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
 {
@@ -717,6 +721,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
                break;
        }
 }
+EXPORT_SYMBOL(l2cap_chan_close);
 
 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
 {
@@ -1455,13 +1460,12 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
 {
        struct hci_conn *hcon = conn->hcon;
+       struct hci_dev *hdev = hcon->hdev;
        struct l2cap_chan *chan, *pchan;
        u8 dst_type;
 
        BT_DBG("");
 
-       bt_6lowpan_add_conn(conn);
-
        /* Check if we have socket listening on cid */
        pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
                                          &hcon->src, &hcon->dst);
@@ -1475,9 +1479,28 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
        dst_type = bdaddr_type(hcon, hcon->dst_type);
 
        /* If device is blocked, do not create a channel for it */
-       if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
+       if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
                return;
 
+       /* For LE slave connections, make sure the connection interval
+        * is in the range of the minium and maximum interval that has
+        * been configured for this connection. If not, then trigger
+        * the connection update procedure.
+        */
+       if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
+           (hcon->le_conn_interval < hcon->le_conn_min_interval ||
+            hcon->le_conn_interval > hcon->le_conn_max_interval)) {
+               struct l2cap_conn_param_update_req req;
+
+               req.min = cpu_to_le16(hcon->le_conn_min_interval);
+               req.max = cpu_to_le16(hcon->le_conn_max_interval);
+               req.latency = cpu_to_le16(hcon->le_conn_latency);
+               req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
+
+               l2cap_send_cmd(conn, l2cap_get_ident(conn),
+                              L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
+       }
+
        l2cap_chan_lock(pchan);
 
        chan = pchan->ops->new_connection(pchan);
@@ -2118,7 +2141,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
        struct sk_buff **frag;
        int sent = 0;
 
-       if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
+       if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
+                                       msg->msg_iov, count))
                return -EFAULT;
 
        sent += count;
@@ -2131,18 +2155,17 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
 
                count = min_t(unsigned int, conn->mtu, len);
 
-               tmp = chan->ops->alloc_skb(chan, count,
+               tmp = chan->ops->alloc_skb(chan, 0, count,
                                           msg->msg_flags & MSG_DONTWAIT);
                if (IS_ERR(tmp))
                        return PTR_ERR(tmp);
 
                *frag = tmp;
 
-               if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+               if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
+                                               msg->msg_iov, count))
                        return -EFAULT;
 
-               (*frag)->priority = skb->priority;
-
                sent += count;
                len  -= count;
 
@@ -2156,26 +2179,23 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
 }
 
 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
-                                                struct msghdr *msg, size_t len,
-                                                u32 priority)
+                                                struct msghdr *msg, size_t len)
 {
        struct l2cap_conn *conn = chan->conn;
        struct sk_buff *skb;
        int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
        struct l2cap_hdr *lh;
 
-       BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
-              __le16_to_cpu(chan->psm), len, priority);
+       BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
+              __le16_to_cpu(chan->psm), len);
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
 
-       skb = chan->ops->alloc_skb(chan, count + hlen,
+       skb = chan->ops->alloc_skb(chan, hlen, count,
                                   msg->msg_flags & MSG_DONTWAIT);
        if (IS_ERR(skb))
                return skb;
 
-       skb->priority = priority;
-
        /* Create L2CAP header */
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
@@ -2191,8 +2211,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
 }
 
 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
-                                             struct msghdr *msg, size_t len,
-                                             u32 priority)
+                                             struct msghdr *msg, size_t len)
 {
        struct l2cap_conn *conn = chan->conn;
        struct sk_buff *skb;
@@ -2203,13 +2222,11 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
 
        count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
 
-       skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
+       skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
                                   msg->msg_flags & MSG_DONTWAIT);
        if (IS_ERR(skb))
                return skb;
 
-       skb->priority = priority;
-
        /* Create L2CAP header */
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
@@ -2247,7 +2264,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
 
-       skb = chan->ops->alloc_skb(chan, count + hlen,
+       skb = chan->ops->alloc_skb(chan, hlen, count,
                                   msg->msg_flags & MSG_DONTWAIT);
        if (IS_ERR(skb))
                return skb;
@@ -2368,7 +2385,7 @@ static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
 
-       skb = chan->ops->alloc_skb(chan, count + hlen,
+       skb = chan->ops->alloc_skb(chan, hlen, count,
                                   msg->msg_flags & MSG_DONTWAIT);
        if (IS_ERR(skb))
                return skb;
@@ -2430,8 +2447,7 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
        return 0;
 }
 
-int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
-                   u32 priority)
+int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
 {
        struct sk_buff *skb;
        int err;
@@ -2442,7 +2458,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
 
        /* Connectionless channel */
        if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
-               skb = l2cap_create_connless_pdu(chan, msg, len, priority);
+               skb = l2cap_create_connless_pdu(chan, msg, len);
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
@@ -2499,7 +2515,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
                        return -EMSGSIZE;
 
                /* Create a basic PDU */
-               skb = l2cap_create_basic_pdu(chan, msg, len, priority);
+               skb = l2cap_create_basic_pdu(chan, msg, len);
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
@@ -2562,6 +2578,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
 
        return err;
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_send);
 
 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
 {
@@ -3217,6 +3234,9 @@ done:
 
        switch (chan->mode) {
        case L2CAP_MODE_BASIC:
+               if (disable_ertm)
+                       break;
+
                if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
                    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
                        break;
@@ -5197,27 +5217,6 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
        return 0;
 }
 
-static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
-                                        u16 to_multiplier)
-{
-       u16 max_latency;
-
-       if (min > max || min < 6 || max > 3200)
-               return -EINVAL;
-
-       if (to_multiplier < 10 || to_multiplier > 3200)
-               return -EINVAL;
-
-       if (max >= to_multiplier * 8)
-               return -EINVAL;
-
-       max_latency = (to_multiplier * 8 / max) - 1;
-       if (latency > 499 || latency > max_latency)
-               return -EINVAL;
-
-       return 0;
-}
-
 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
                                              struct l2cap_cmd_hdr *cmd,
                                              u16 cmd_len, u8 *data)
@@ -5228,7 +5227,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
        u16 min, max, latency, to_multiplier;
        int err;
 
-       if (!(hcon->link_mode & HCI_LM_MASTER))
+       if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
                return -EINVAL;
 
        if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
@@ -5245,7 +5244,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
        memset(&rsp, 0, sizeof(rsp));
 
-       err = l2cap_check_conn_param(min, max, latency, to_multiplier);
+       err = hci_check_conn_params(min, max, latency, to_multiplier);
        if (err)
                rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
        else
@@ -5254,8 +5253,16 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
                       sizeof(rsp), &rsp);
 
-       if (!err)
-               hci_le_conn_update(hcon, min, max, latency, to_multiplier);
+       if (!err) {
+               u8 store_hint;
+
+               store_hint = hci_le_conn_update(hcon, min, max, latency,
+                                               to_multiplier);
+               mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
+                                   store_hint, min, max, latency,
+                                   to_multiplier);
+
+       }
 
        return 0;
 }
@@ -6879,9 +6886,6 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
 
        BT_DBG("chan %p, len %d", chan, skb->len);
 
-       if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
-               goto drop;
-
        if (chan->imtu < skb->len)
                goto drop;
 
@@ -6914,6 +6918,16 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
                return;
        }
 
+       /* Since we can't actively block incoming LE connections we must
+        * at least ensure that we ignore incoming data from them.
+        */
+       if (hcon->type == LE_LINK &&
+           hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
+                                  bdaddr_type(hcon, hcon->dst_type))) {
+               kfree_skb(skb);
+               return;
+       }
+
        BT_DBG("len %d, cid 0x%4.4x", len, cid);
 
        switch (cid) {
@@ -6940,10 +6954,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
                        l2cap_conn_del(conn->hcon, EACCES);
                break;
 
-       case L2CAP_FC_6LOWPAN:
-               bt_6lowpan_recv(conn, skb);
-               break;
-
        default:
                l2cap_data_channel(conn, cid, skb);
                break;
@@ -7042,7 +7052,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        struct l2cap_conn *conn;
        struct hci_conn *hcon;
        struct hci_dev *hdev;
-       __u8 auth_type;
        int err;
 
        BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
@@ -7118,9 +7127,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        chan->psm = psm;
        chan->dcid = cid;
 
-       auth_type = l2cap_get_auth_type(chan);
-
        if (bdaddr_type_is_le(dst_type)) {
+               bool master;
+
                /* Convert from L2CAP channel address type to HCI address type
                 */
                if (dst_type == BDADDR_LE_PUBLIC)
@@ -7128,9 +7137,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                else
                        dst_type = ADDR_LE_DEV_RANDOM;
 
+               master = !test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
                hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-                                     auth_type);
+                                     HCI_LE_CONN_TIMEOUT, master);
        } else {
+               u8 auth_type = l2cap_get_auth_type(chan);
                hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
        }
 
@@ -7190,6 +7202,7 @@ done:
        hci_dev_put(hdev);
        return err;
 }
+EXPORT_SYMBOL_GPL(l2cap_chan_connect);
 
 /* ---- L2CAP interface with lower layer (HCI) ---- */
 
@@ -7252,8 +7265,6 @@ void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
 {
        BT_DBG("hcon %p reason %d", hcon, reason);
 
-       bt_6lowpan_del_conn(hcon->l2cap_data);
-
        l2cap_conn_del(hcon, bt_to_errno(reason));
 }
 
@@ -7536,14 +7547,11 @@ int __init l2cap_init(void)
        debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
                           &le_default_mps);
 
-       bt_6lowpan_init();
-
        return 0;
 }
 
 void l2cap_exit(void)
 {
-       bt_6lowpan_cleanup();
        debugfs_remove(l2cap_debugfs);
        l2cap_cleanup_sockets();
 }
index e1378693cc907086d5e90a4f8003c11cb774bda2..9bb4d1b3a48382301ba9af14c392b7754f4198e8 100644 (file)
@@ -361,7 +361,8 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
        BT_DBG("sock %p, sk %p", sock, sk);
 
        if (peer && sk->sk_state != BT_CONNECTED &&
-           sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
+           sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
+           sk->sk_state != BT_CONFIG)
                return -ENOTCONN;
 
        memset(la, 0, sizeof(struct sockaddr_l2));
@@ -964,7 +965,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                return err;
 
        l2cap_chan_lock(chan);
-       err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
+       err = l2cap_chan_send(chan, msg, len);
        l2cap_chan_unlock(chan);
 
        return err;
@@ -1292,6 +1293,7 @@ static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
 }
 
 static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+                                              unsigned long hdr_len,
                                               unsigned long len, int nb)
 {
        struct sock *sk = chan->data;
@@ -1299,17 +1301,26 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
        int err;
 
        l2cap_chan_unlock(chan);
-       skb = bt_skb_send_alloc(sk, len, nb, &err);
+       skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err);
        l2cap_chan_lock(chan);
 
        if (!skb)
                return ERR_PTR(err);
 
+       skb->priority = sk->sk_priority;
+
        bt_cb(skb)->chan = chan;
 
        return skb;
 }
 
+static int l2cap_sock_memcpy_fromiovec_cb(struct l2cap_chan *chan,
+                                         unsigned char *kdata,
+                                         struct iovec *iov, int len)
+{
+       return memcpy_fromiovec(kdata, iov, len);
+}
+
 static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
 {
        struct sock *sk = chan->data;
@@ -1375,20 +1386,21 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
        sk->sk_state_change(sk);
 }
 
-static struct l2cap_ops l2cap_chan_ops = {
-       .name           = "L2CAP Socket Interface",
-       .new_connection = l2cap_sock_new_connection_cb,
-       .recv           = l2cap_sock_recv_cb,
-       .close          = l2cap_sock_close_cb,
-       .teardown       = l2cap_sock_teardown_cb,
-       .state_change   = l2cap_sock_state_change_cb,
-       .ready          = l2cap_sock_ready_cb,
-       .defer          = l2cap_sock_defer_cb,
-       .resume         = l2cap_sock_resume_cb,
-       .suspend        = l2cap_sock_suspend_cb,
-       .set_shutdown   = l2cap_sock_set_shutdown_cb,
-       .get_sndtimeo   = l2cap_sock_get_sndtimeo_cb,
-       .alloc_skb      = l2cap_sock_alloc_skb_cb,
+static const struct l2cap_ops l2cap_chan_ops = {
+       .name                   = "L2CAP Socket Interface",
+       .new_connection         = l2cap_sock_new_connection_cb,
+       .recv                   = l2cap_sock_recv_cb,
+       .close                  = l2cap_sock_close_cb,
+       .teardown               = l2cap_sock_teardown_cb,
+       .state_change           = l2cap_sock_state_change_cb,
+       .ready                  = l2cap_sock_ready_cb,
+       .defer                  = l2cap_sock_defer_cb,
+       .resume                 = l2cap_sock_resume_cb,
+       .suspend                = l2cap_sock_suspend_cb,
+       .set_shutdown           = l2cap_sock_set_shutdown_cb,
+       .get_sndtimeo           = l2cap_sock_get_sndtimeo_cb,
+       .alloc_skb              = l2cap_sock_alloc_skb_cb,
+       .memcpy_fromiovec       = l2cap_sock_memcpy_fromiovec_cb,
 };
 
 static void l2cap_sock_destruct(struct sock *sk)
index af8e0a6243b7520617156f79d7d430ce12ef4be9..91b1f92c681e0372ab1276d99b7e89750b2e0076 100644 (file)
@@ -35,7 +35,7 @@
 #include "smp.h"
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  6
+#define MGMT_REVISION  7
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -85,6 +85,14 @@ static const u16 mgmt_commands[] = {
        MGMT_OP_SET_PRIVACY,
        MGMT_OP_LOAD_IRKS,
        MGMT_OP_GET_CONN_INFO,
+       MGMT_OP_GET_CLOCK_INFO,
+       MGMT_OP_ADD_DEVICE,
+       MGMT_OP_REMOVE_DEVICE,
+       MGMT_OP_LOAD_CONN_PARAM,
+       MGMT_OP_READ_UNCONF_INDEX_LIST,
+       MGMT_OP_READ_CONFIG_INFO,
+       MGMT_OP_SET_EXTERNAL_CONFIG,
+       MGMT_OP_SET_PUBLIC_ADDRESS,
 };
 
 static const u16 mgmt_events[] = {
@@ -111,6 +119,12 @@ static const u16 mgmt_events[] = {
        MGMT_EV_PASSKEY_NOTIFY,
        MGMT_EV_NEW_IRK,
        MGMT_EV_NEW_CSRK,
+       MGMT_EV_DEVICE_ADDED,
+       MGMT_EV_DEVICE_REMOVED,
+       MGMT_EV_NEW_CONN_PARAM,
+       MGMT_EV_UNCONF_INDEX_ADDED,
+       MGMT_EV_UNCONF_INDEX_REMOVED,
+       MGMT_EV_NEW_CONFIG_OPTIONS,
 };
 
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
@@ -200,6 +214,36 @@ static u8 mgmt_status(u8 hci_status)
        return MGMT_STATUS_FAILED;
 }
 
+static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
+                     struct sock *skip_sk)
+{
+       struct sk_buff *skb;
+       struct mgmt_hdr *hdr;
+
+       skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (void *) skb_put(skb, sizeof(*hdr));
+       hdr->opcode = cpu_to_le16(event);
+       if (hdev)
+               hdr->index = cpu_to_le16(hdev->id);
+       else
+               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+       hdr->len = cpu_to_le16(data_len);
+
+       if (data)
+               memcpy(skb_put(skb, data_len), data, data_len);
+
+       /* Time stamp */
+       __net_timestamp(skb);
+
+       hci_send_to_control(skb, skip_sk);
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 {
        struct sk_buff *skb;
@@ -327,7 +371,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
-               if (d->dev_type == HCI_BREDR)
+               if (d->dev_type == HCI_BREDR &&
+                   !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
                        count++;
        }
 
@@ -340,13 +385,19 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
-               if (test_bit(HCI_SETUP, &d->dev_flags))
+               if (test_bit(HCI_SETUP, &d->dev_flags) ||
+                   test_bit(HCI_CONFIG, &d->dev_flags) ||
+                   test_bit(HCI_USER_CHANNEL, &d->dev_flags))
                        continue;
 
-               if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+               /* Devices marked as raw-only are neither configured
+                * nor unconfigured controllers.
+                */
+               if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
                        continue;
 
-               if (d->dev_type == HCI_BREDR) {
+               if (d->dev_type == HCI_BREDR &&
+                   !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
                        rp->index[count++] = cpu_to_le16(d->id);
                        BT_DBG("Added hci%u", d->id);
                }
@@ -365,6 +416,138 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
        return err;
 }
 
+static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
+                                 void *data, u16 data_len)
+{
+       struct mgmt_rp_read_unconf_index_list *rp;
+       struct hci_dev *d;
+       size_t rp_len;
+       u16 count;
+       int err;
+
+       BT_DBG("sock %p", sk);
+
+       read_lock(&hci_dev_list_lock);
+
+       count = 0;
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (d->dev_type == HCI_BREDR &&
+                   test_bit(HCI_UNCONFIGURED, &d->dev_flags))
+                       count++;
+       }
+
+       rp_len = sizeof(*rp) + (2 * count);
+       rp = kmalloc(rp_len, GFP_ATOMIC);
+       if (!rp) {
+               read_unlock(&hci_dev_list_lock);
+               return -ENOMEM;
+       }
+
+       count = 0;
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (test_bit(HCI_SETUP, &d->dev_flags) ||
+                   test_bit(HCI_CONFIG, &d->dev_flags) ||
+                   test_bit(HCI_USER_CHANNEL, &d->dev_flags))
+                       continue;
+
+               /* Devices marked as raw-only are neither configured
+                * nor unconfigured controllers.
+                */
+               if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
+                       continue;
+
+               if (d->dev_type == HCI_BREDR &&
+                   test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
+                       rp->index[count++] = cpu_to_le16(d->id);
+                       BT_DBG("Added hci%u", d->id);
+               }
+       }
+
+       rp->num_controllers = cpu_to_le16(count);
+       rp_len = sizeof(*rp) + (2 * count);
+
+       read_unlock(&hci_dev_list_lock);
+
+       err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
+                          0, rp, rp_len);
+
+       kfree(rp);
+
+       return err;
+}
+
+static bool is_configured(struct hci_dev *hdev)
+{
+       if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
+           !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+               return false;
+
+       if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
+           !bacmp(&hdev->public_addr, BDADDR_ANY))
+               return false;
+
+       return true;
+}
+
+static __le32 get_missing_options(struct hci_dev *hdev)
+{
+       u32 options = 0;
+
+       if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
+           !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
+               options |= MGMT_OPTION_EXTERNAL_CONFIG;
+
+       if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
+           !bacmp(&hdev->public_addr, BDADDR_ANY))
+               options |= MGMT_OPTION_PUBLIC_ADDRESS;
+
+       return cpu_to_le32(options);
+}
+
+static int new_options(struct hci_dev *hdev, struct sock *skip)
+{
+       __le32 options = get_missing_options(hdev);
+
+       return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
+                         sizeof(options), skip);
+}
+
+static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
+{
+       __le32 options = get_missing_options(hdev);
+
+       return cmd_complete(sk, hdev->id, opcode, 0, &options,
+                           sizeof(options));
+}
+
+static int read_config_info(struct sock *sk, struct hci_dev *hdev,
+                           void *data, u16 data_len)
+{
+       struct mgmt_rp_read_config_info rp;
+       u32 options = 0;
+
+       BT_DBG("sock %p %s", sk, hdev->name);
+
+       hci_dev_lock(hdev);
+
+       memset(&rp, 0, sizeof(rp));
+       rp.manufacturer = cpu_to_le16(hdev->manufacturer);
+
+       if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
+               options |= MGMT_OPTION_EXTERNAL_CONFIG;
+
+       if (hdev->set_bdaddr)
+               options |= MGMT_OPTION_PUBLIC_ADDRESS;
+
+       rp.supported_options = cpu_to_le32(options);
+       rp.missing_options = get_missing_options(hdev);
+
+       hci_dev_unlock(hdev);
+
+       return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
+                           sizeof(rp));
+}
+
 static u32 get_supported_settings(struct hci_dev *hdev)
 {
        u32 settings = 0;
@@ -372,12 +555,12 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        settings |= MGMT_SETTING_POWERED;
        settings |= MGMT_SETTING_PAIRABLE;
        settings |= MGMT_SETTING_DEBUG_KEYS;
+       settings |= MGMT_SETTING_CONNECTABLE;
+       settings |= MGMT_SETTING_DISCOVERABLE;
 
        if (lmp_bredr_capable(hdev)) {
-               settings |= MGMT_SETTING_CONNECTABLE;
                if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
                        settings |= MGMT_SETTING_FAST_CONNECTABLE;
-               settings |= MGMT_SETTING_DISCOVERABLE;
                settings |= MGMT_SETTING_BREDR;
                settings |= MGMT_SETTING_LINK_SECURITY;
 
@@ -387,7 +570,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                }
 
                if (lmp_sc_capable(hdev) ||
-                   test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+                   test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
                        settings |= MGMT_SETTING_SECURE_CONN;
        }
 
@@ -397,6 +580,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                settings |= MGMT_SETTING_PRIVACY;
        }
 
+       if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
+           hdev->set_bdaddr)
+               settings |= MGMT_SETTING_CONFIGURATION;
+
        return settings;
 }
 
@@ -440,7 +627,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
                settings |= MGMT_SETTING_SECURE_CONN;
 
-       if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
+       if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
                settings |= MGMT_SETTING_DEBUG_KEYS;
 
        if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
@@ -571,6 +758,22 @@ static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
        return NULL;
 }
 
+static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
+                                                 struct hci_dev *hdev,
+                                                 const void *data)
+{
+       struct pending_cmd *cmd;
+
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+               if (cmd->user_data != data)
+                       continue;
+               if (cmd->opcode == opcode)
+                       return cmd;
+       }
+
+       return NULL;
+}
+
 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
 {
        u8 ad_len = 0;
@@ -836,6 +1039,13 @@ static bool get_connectable(struct hci_dev *hdev)
        return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
 }
 
+static void disable_advertising(struct hci_request *req)
+{
+       u8 enable = 0x00;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
 static void enable_advertising(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
@@ -843,12 +1053,18 @@ static void enable_advertising(struct hci_request *req)
        u8 own_addr_type, enable = 0x01;
        bool connectable;
 
-       /* Clear the HCI_ADVERTISING bit temporarily so that the
+       if (hci_conn_num(hdev, LE_LINK) > 0)
+               return;
+
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+               disable_advertising(req);
+
+       /* Clear the HCI_LE_ADV bit temporarily so that the
         * hci_update_random_address knows that it's safe to go ahead
         * and write a new random address. The flag will be set back on
         * as soon as the SET_ADV_ENABLE HCI command completes.
         */
-       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+       clear_bit(HCI_LE_ADV, &hdev->dev_flags);
 
        connectable = get_connectable(hdev);
 
@@ -871,13 +1087,6 @@ static void enable_advertising(struct hci_request *req)
        hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
 }
 
-static void disable_advertising(struct hci_request *req)
-{
-       u8 enable = 0x00;
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
 static void service_cache_off(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -909,19 +1118,14 @@ static void rpa_expired(struct work_struct *work)
 
        set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
 
-       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
-           hci_conn_num(hdev, LE_LINK) > 0)
+       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
                return;
 
        /* The generation of a new RPA and programming it into the
         * controller happens in the enable_advertising() function.
         */
-
        hci_req_init(&req, hdev);
-
-       disable_advertising(&req);
        enable_advertising(&req);
-
        hci_req_run(&req, NULL);
 }
 
@@ -984,7 +1188,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
 {
        struct pending_cmd *cmd;
 
-       cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd)
                return NULL;
 
@@ -1047,7 +1251,7 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
        }
 }
 
-static void hci_stop_discovery(struct hci_request *req)
+static bool hci_stop_discovery(struct hci_request *req)
 {
        struct hci_dev *hdev = req->hdev;
        struct hci_cp_remote_name_req_cancel cp;
@@ -1062,32 +1266,39 @@ static void hci_stop_discovery(struct hci_request *req)
                        hci_req_add_le_scan_disable(req);
                }
 
-               break;
+               return true;
 
        case DISCOVERY_RESOLVING:
                e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
                                                     NAME_PENDING);
                if (!e)
-                       return;
+                       break;
 
                bacpy(&cp.bdaddr, &e->data.bdaddr);
                hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
                            &cp);
 
-               break;
+               return true;
 
        default:
                /* Passive scanning */
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
                        hci_req_add_le_scan_disable(req);
+                       return true;
+               }
+
                break;
        }
+
+       return false;
 }
 
 static int clean_up_hci_state(struct hci_dev *hdev)
 {
        struct hci_request req;
        struct hci_conn *conn;
+       bool discov_stopped;
+       int err;
 
        hci_req_init(&req, hdev);
 
@@ -1097,10 +1308,10 @@ static int clean_up_hci_state(struct hci_dev *hdev)
                hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
        }
 
-       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
                disable_advertising(&req);
 
-       hci_stop_discovery(&req);
+       discov_stopped = hci_stop_discovery(&req);
 
        list_for_each_entry(conn, &hdev->conn_hash.list, list) {
                struct hci_cp_disconnect dc;
@@ -1134,7 +1345,11 @@ static int clean_up_hci_state(struct hci_dev *hdev)
                }
        }
 
-       return hci_req_run(&req, clean_up_hci_complete);
+       err = hci_req_run(&req, clean_up_hci_complete);
+       if (!err && discov_stopped)
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+
+       return err;
 }
 
 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1203,36 +1418,6 @@ failed:
        return err;
 }
 
-static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
-                     struct sock *skip_sk)
-{
-       struct sk_buff *skb;
-       struct mgmt_hdr *hdr;
-
-       skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
-
-       hdr = (void *) skb_put(skb, sizeof(*hdr));
-       hdr->opcode = cpu_to_le16(event);
-       if (hdev)
-               hdr->index = cpu_to_le16(hdev->id);
-       else
-               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
-       hdr->len = cpu_to_le16(data_len);
-
-       if (data)
-               memcpy(skb_put(skb, data_len), data, data_len);
-
-       /* Time stamp */
-       __net_timestamp(skb);
-
-       hci_send_to_control(skb, skip_sk);
-       kfree_skb(skb);
-
-       return 0;
-}
-
 static int new_settings(struct hci_dev *hdev, struct sock *skip)
 {
        __le32 ev;
@@ -1242,6 +1427,11 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
        return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
 }
 
+int mgmt_new_settings(struct hci_dev *hdev)
+{
+       return new_settings(hdev, NULL);
+}
+
 struct cmd_lookup {
        struct sock *sk;
        struct hci_dev *hdev;
@@ -1577,8 +1767,10 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
 
        send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
 
-       if (changed)
+       if (changed) {
                new_settings(hdev, cmd->sk);
+               hci_update_background_scan(hdev);
+       }
 
 remove_cmd:
        mgmt_pending_remove(cmd);
@@ -1607,8 +1799,10 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
        if (err < 0)
                return err;
 
-       if (changed)
+       if (changed) {
+               hci_update_background_scan(hdev);
                return new_settings(hdev, sk);
+       }
 
        return 0;
 }
@@ -1689,10 +1883,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                write_fast_connectable(&req, false);
 
        if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
-           hci_conn_num(hdev, LE_LINK) == 0) {
-               disable_advertising(&req);
+           !test_bit(HCI_LE_ADV, &hdev->dev_flags))
                enable_advertising(&req);
-       }
 
        err = hci_req_run(&req, set_connectable_complete);
        if (err < 0) {
@@ -1877,6 +2069,10 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
+       if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+               hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
+                            sizeof(cp->val), &cp->val);
+
        err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
        if (err < 0) {
                mgmt_pending_remove(cmd);
@@ -1973,6 +2169,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status)
                update_scan_rsp_data(&req);
                hci_req_run(&req, NULL);
 
+               hci_update_background_scan(hdev);
+
                hci_dev_unlock(hdev);
        }
 }
@@ -2050,7 +2248,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                hci_cp.le = val;
                hci_cp.simul = lmp_le_br_capable(hdev);
        } else {
-               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
                        disable_advertising(&req);
        }
 
@@ -2373,6 +2571,8 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                          u16 len)
 {
        struct mgmt_cp_load_link_keys *cp = data;
+       const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
+                                  sizeof(struct mgmt_link_key_info));
        u16 key_count, expected_len;
        bool changed;
        int i;
@@ -2384,6 +2584,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
        key_count = __le16_to_cpu(cp->key_count);
+       if (key_count > max_key_count) {
+               BT_ERR("load_link_keys: too big key_count value %u",
+                      key_count);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
 
        expected_len = sizeof(*cp) + key_count *
                                        sizeof(struct mgmt_link_key_info);
@@ -2414,9 +2620,11 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_link_keys_clear(hdev);
 
        if (cp->debug_keys)
-               changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
+                                           &hdev->dev_flags);
        else
-               changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
+                                            &hdev->dev_flags);
 
        if (changed)
                new_settings(hdev, NULL);
@@ -2424,8 +2632,14 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        for (i = 0; i < key_count; i++) {
                struct mgmt_link_key_info *key = &cp->keys[i];
 
-               hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
-                                key->type, key->pin_len);
+               /* Always ignore debug keys and require a new pairing if
+                * the user wants to use them.
+                */
+               if (key->type == HCI_LK_DEBUG_COMBINATION)
+                       continue;
+
+               hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
+                                key->type, key->pin_len, NULL);
        }
 
        cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
@@ -2766,6 +2980,10 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("");
 
+       if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
+               return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
+                                   MGMT_STATUS_INVALID_PARAMS, NULL, 0);
+
        hci_dev_lock(hdev);
 
        hdev->io_capability = cp->io_capability;
@@ -2878,6 +3096,11 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                                    MGMT_STATUS_INVALID_PARAMS,
                                    &rp, sizeof(rp));
 
+       if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
+               return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
@@ -2902,8 +3125,20 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                else
                        addr_type = ADDR_LE_DEV_RANDOM;
 
+               /* When pairing a new device, it is expected to remember
+                * this device for future connections. Adding the connection
+                * parameter information ahead of time allows tracking
+                * of the slave preferred values and will speed up any
+                * further connection establishment.
+                *
+                * If connection parameters already exist, then they
+                * will be kept and this function does nothing.
+                */
+               hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+
+               /* Request a connection with master = true role */
                conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-                                     sec_level, auth_type);
+                                     sec_level, HCI_LE_CONN_TIMEOUT, true);
        }
 
        if (IS_ERR(conn)) {
@@ -3031,14 +3266,7 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
        }
 
        if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
-               /* Continue with pairing via SMP. The hdev lock must be
-                * released as SMP may try to recquire it for crypto
-                * purposes.
-                */
-               hci_dev_unlock(hdev);
                err = smp_user_confirm_reply(conn, mgmt_op, passkey);
-               hci_dev_lock(hdev);
-
                if (!err)
                        err = cmd_complete(sk, hdev->id, mgmt_op,
                                           MGMT_STATUS_SUCCESS, addr,
@@ -3516,11 +3744,21 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                        goto failed;
                }
 
-               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
-                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                        MGMT_STATUS_REJECTED);
-                       mgmt_pending_remove(cmd);
-                       goto failed;
+               if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
+                       /* Don't let discovery abort an outgoing
+                        * connection attempt that's using directed
+                        * advertising.
+                        */
+                       if (hci_conn_hash_lookup_state(hdev, LE_LINK,
+                                                      BT_CONNECT)) {
+                               err = cmd_status(sk, hdev->id,
+                                                MGMT_OP_START_DISCOVERY,
+                                                MGMT_STATUS_REJECTED);
+                               mgmt_pending_remove(cmd);
+                               goto failed;
+                       }
+
+                       disable_advertising(&req);
                }
 
                /* If controller is scanning, it means the background scanning
@@ -3723,12 +3961,18 @@ static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
-       if (err < 0)
+       err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
+                                 cp->addr.type);
+       if (err < 0) {
                status = MGMT_STATUS_FAILED;
-       else
-               status = MGMT_STATUS_SUCCESS;
+               goto done;
+       }
 
+       mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
+                  sk);
+       status = MGMT_STATUS_SUCCESS;
+
+done:
        err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
                           &cp->addr, sizeof(cp->addr));
 
@@ -3753,12 +3997,18 @@ static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
-       if (err < 0)
+       err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
+                                 cp->addr.type);
+       if (err < 0) {
                status = MGMT_STATUS_INVALID_PARAMS;
-       else
-               status = MGMT_STATUS_SUCCESS;
+               goto done;
+       }
 
+       mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
+                  sk);
+       status = MGMT_STATUS_SUCCESS;
+
+done:
        err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
                           &cp->addr, sizeof(cp->addr));
 
@@ -3813,8 +4063,13 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status)
                return;
        }
 
-       mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
-                            &match);
+       if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
+               set_bit(HCI_ADVERTISING, &hdev->dev_flags);
+       else
+               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+       mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
+                            &match);
 
        new_settings(hdev, match.sk);
 
@@ -3853,7 +4108,9 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
         * necessary).
         */
        if (!hdev_is_powered(hdev) || val == enabled ||
-           hci_conn_num(hdev, LE_LINK) > 0) {
+           hci_conn_num(hdev, LE_LINK) > 0 ||
+           (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+            hdev->le_scan_type == LE_SCAN_ACTIVE)) {
                bool changed = false;
 
                if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
@@ -4105,7 +4362,8 @@ static void set_bredr_scan(struct hci_request *req)
         */
        write_fast_connectable(req, false);
 
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+           !list_empty(&hdev->whitelist))
                scan |= SCAN_PAGE;
        if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
                scan |= SCAN_INQUIRY;
@@ -4219,7 +4477,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_req_init(&req, hdev);
 
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+           !list_empty(&hdev->whitelist))
                set_bredr_scan(&req);
 
        /* Since only the advertising data flags will change, there
@@ -4252,7 +4511,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
                                  status);
 
        if (!lmp_sc_capable(hdev) &&
-           !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+           !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
@@ -4328,21 +4587,37 @@ static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
                          void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
-       bool changed;
+       bool changed, use_changed;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (cp->val != 0x00 && cp->val != 0x01)
+       if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
                                  MGMT_STATUS_INVALID_PARAMS);
 
        hci_dev_lock(hdev);
 
        if (cp->val)
-               changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
+                                           &hdev->dev_flags);
        else
-               changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
+                                            &hdev->dev_flags);
+
+       if (cp->val == 0x02)
+               use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
+                                               &hdev->dev_flags);
+       else
+               use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
+                                                &hdev->dev_flags);
+
+       if (hdev_is_powered(hdev) && use_changed &&
+           test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+               u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
+               hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
+                            sizeof(mode), &mode);
+       }
 
        err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
        if (err < 0)
@@ -4426,6 +4701,8 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
                     u16 len)
 {
        struct mgmt_cp_load_irks *cp = cp_data;
+       const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
+                                  sizeof(struct mgmt_irk_info));
        u16 irk_count, expected_len;
        int i, err;
 
@@ -4436,6 +4713,11 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
        irk_count = __le16_to_cpu(cp->irk_count);
+       if (irk_count > max_irk_count) {
+               BT_ERR("load_irks: too big irk_count value %u", irk_count);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
 
        expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
        if (expected_len != len) {
@@ -4505,6 +4787,8 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                               void *cp_data, u16 len)
 {
        struct mgmt_cp_load_long_term_keys *cp = cp_data;
+       const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
+                                  sizeof(struct mgmt_ltk_info));
        u16 key_count, expected_len;
        int i, err;
 
@@ -4515,6 +4799,11 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
        key_count = __le16_to_cpu(cp->key_count);
+       if (key_count > max_key_count) {
+               BT_ERR("load_ltks: too big key_count value %u", key_count);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
 
        expected_len = sizeof(*cp) + key_count *
                                        sizeof(struct mgmt_ltk_info);
@@ -4550,9 +4839,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                        addr_type = ADDR_LE_DEV_RANDOM;
 
                if (key->master)
-                       type = HCI_SMP_LTK;
+                       type = SMP_LTK;
                else
-                       type = HCI_SMP_LTK_SLAVE;
+                       type = SMP_LTK_SLAVE;
 
                switch (key->type) {
                case MGMT_LTK_UNAUTHENTICATED:
@@ -4790,6 +5079,559 @@ unlock:
        return err;
 }
 
+static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
+{
+       struct mgmt_cp_get_clock_info *cp;
+       struct mgmt_rp_get_clock_info rp;
+       struct hci_cp_read_clock *hci_cp;
+       struct pending_cmd *cmd;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status %u", hdev->name, status);
+
+       hci_dev_lock(hdev);
+
+       hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
+       if (!hci_cp)
+               goto unlock;
+
+       if (hci_cp->which) {
+               u16 handle = __le16_to_cpu(hci_cp->handle);
+               conn = hci_conn_hash_lookup_handle(hdev, handle);
+       } else {
+               conn = NULL;
+       }
+
+       cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
+       if (!cmd)
+               goto unlock;
+
+       cp = cmd->param;
+
+       memset(&rp, 0, sizeof(rp));
+       memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
+
+       if (status)
+               goto send_rsp;
+
+       rp.local_clock = cpu_to_le32(hdev->clock);
+
+       if (conn) {
+               rp.piconet_clock = cpu_to_le32(conn->clock);
+               rp.accuracy = cpu_to_le16(conn->clock_accuracy);
+       }
+
+send_rsp:
+       cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
+                    &rp, sizeof(rp));
+       mgmt_pending_remove(cmd);
+       if (conn)
+               hci_conn_drop(conn);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
+                        u16 len)
+{
+       struct mgmt_cp_get_clock_info *cp = data;
+       struct mgmt_rp_get_clock_info rp;
+       struct hci_cp_read_clock hci_cp;
+       struct pending_cmd *cmd;
+       struct hci_request req;
+       struct hci_conn *conn;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       memset(&rp, 0, sizeof(rp));
+       bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+       rp.addr.type = cp->addr.type;
+
+       if (cp->addr.type != BDADDR_BREDR)
+               return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &rp, sizeof(rp));
+
+       hci_dev_lock(hdev);
+
+       if (!hdev_is_powered(hdev)) {
+               err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+                                  MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
+               goto unlock;
+       }
+
+       if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
+               conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+                                              &cp->addr.bdaddr);
+               if (!conn || conn->state != BT_CONNECTED) {
+                       err = cmd_complete(sk, hdev->id,
+                                          MGMT_OP_GET_CLOCK_INFO,
+                                          MGMT_STATUS_NOT_CONNECTED,
+                                          &rp, sizeof(rp));
+                       goto unlock;
+               }
+       } else {
+               conn = NULL;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       hci_req_init(&req, hdev);
+
+       memset(&hci_cp, 0, sizeof(hci_cp));
+       hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
+
+       if (conn) {
+               hci_conn_hold(conn);
+               cmd->user_data = conn;
+
+               hci_cp.handle = cpu_to_le16(conn->handle);
+               hci_cp.which = 0x01; /* Piconet clock */
+               hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
+       }
+
+       err = hci_req_run(&req, get_clock_info_complete);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+/* Helper for Add/Remove Device commands */
+static void update_page_scan(struct hci_dev *hdev, u8 scan)
+{
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return;
+
+       if (!hdev_is_powered(hdev))
+               return;
+
+       /* If HCI_CONNECTABLE is set then Add/Remove Device should not
+        * make any changes to page scanning.
+        */
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+               return;
+
+       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+               scan |= SCAN_INQUIRY;
+
+       hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void device_added(struct sock *sk, struct hci_dev *hdev,
+                        bdaddr_t *bdaddr, u8 type, u8 action)
+{
+       struct mgmt_ev_device_added ev;
+
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = type;
+       ev.action = action;
+
+       mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
+}
+
+static int add_device(struct sock *sk, struct hci_dev *hdev,
+                     void *data, u16 len)
+{
+       struct mgmt_cp_add_device *cp = data;
+       u8 auto_conn, addr_type;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!bdaddr_type_is_valid(cp->addr.type) ||
+           !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
+               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &cp->addr, sizeof(cp->addr));
+
+       if (cp->action != 0x00 && cp->action != 0x01)
+               return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                   MGMT_STATUS_INVALID_PARAMS,
+                                   &cp->addr, sizeof(cp->addr));
+
+       hci_dev_lock(hdev);
+
+       if (cp->addr.type == BDADDR_BREDR) {
+               bool update_scan;
+
+               /* Only "connect" action supported for now */
+               if (cp->action != 0x01) {
+                       err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                          MGMT_STATUS_INVALID_PARAMS,
+                                          &cp->addr, sizeof(cp->addr));
+                       goto unlock;
+               }
+
+               update_scan = list_empty(&hdev->whitelist);
+
+               err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
+                                         cp->addr.type);
+               if (err)
+                       goto unlock;
+
+               if (update_scan)
+                       update_page_scan(hdev, SCAN_PAGE);
+
+               goto added;
+       }
+
+       if (cp->addr.type == BDADDR_LE_PUBLIC)
+               addr_type = ADDR_LE_DEV_PUBLIC;
+       else
+               addr_type = ADDR_LE_DEV_RANDOM;
+
+       if (cp->action)
+               auto_conn = HCI_AUTO_CONN_ALWAYS;
+       else
+               auto_conn = HCI_AUTO_CONN_REPORT;
+
+       /* If the connection parameters don't exist for this device,
+        * they will be created and configured with defaults.
+        */
+       if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
+                               auto_conn) < 0) {
+               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                                  MGMT_STATUS_FAILED,
+                                  &cp->addr, sizeof(cp->addr));
+               goto unlock;
+       }
+
+added:
+       device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
+
+       err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
+                          MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static void device_removed(struct sock *sk, struct hci_dev *hdev,
+                          bdaddr_t *bdaddr, u8 type)
+{
+       struct mgmt_ev_device_removed ev;
+
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = type;
+
+       mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
+}
+
+static int remove_device(struct sock *sk, struct hci_dev *hdev,
+                        void *data, u16 len)
+{
+       struct mgmt_cp_remove_device *cp = data;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       hci_dev_lock(hdev);
+
+       if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
+               struct hci_conn_params *params;
+               u8 addr_type;
+
+               if (!bdaddr_type_is_valid(cp->addr.type)) {
+                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+                                          MGMT_STATUS_INVALID_PARAMS,
+                                          &cp->addr, sizeof(cp->addr));
+                       goto unlock;
+               }
+
+               if (cp->addr.type == BDADDR_BREDR) {
+                       err = hci_bdaddr_list_del(&hdev->whitelist,
+                                                 &cp->addr.bdaddr,
+                                                 cp->addr.type);
+                       if (err) {
+                               err = cmd_complete(sk, hdev->id,
+                                                  MGMT_OP_REMOVE_DEVICE,
+                                                  MGMT_STATUS_INVALID_PARAMS,
+                                                  &cp->addr, sizeof(cp->addr));
+                               goto unlock;
+                       }
+
+                       if (list_empty(&hdev->whitelist))
+                               update_page_scan(hdev, SCAN_DISABLED);
+
+                       device_removed(sk, hdev, &cp->addr.bdaddr,
+                                      cp->addr.type);
+                       goto complete;
+               }
+
+               if (cp->addr.type == BDADDR_LE_PUBLIC)
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       addr_type = ADDR_LE_DEV_RANDOM;
+
+               params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
+                                               addr_type);
+               if (!params) {
+                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+                                          MGMT_STATUS_INVALID_PARAMS,
+                                          &cp->addr, sizeof(cp->addr));
+                       goto unlock;
+               }
+
+               if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+                                          MGMT_STATUS_INVALID_PARAMS,
+                                          &cp->addr, sizeof(cp->addr));
+                       goto unlock;
+               }
+
+               list_del(&params->action);
+               list_del(&params->list);
+               kfree(params);
+               hci_update_background_scan(hdev);
+
+               device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
+       } else {
+               struct hci_conn_params *p, *tmp;
+               struct bdaddr_list *b, *btmp;
+
+               if (cp->addr.type) {
+                       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+                                          MGMT_STATUS_INVALID_PARAMS,
+                                          &cp->addr, sizeof(cp->addr));
+                       goto unlock;
+               }
+
+               list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
+                       device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
+                       list_del(&b->list);
+                       kfree(b);
+               }
+
+               update_page_scan(hdev, SCAN_DISABLED);
+
+               list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
+                       if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
+                               continue;
+                       device_removed(sk, hdev, &p->addr, p->addr_type);
+                       list_del(&p->action);
+                       list_del(&p->list);
+                       kfree(p);
+               }
+
+               BT_DBG("All LE connection parameters were removed");
+
+               hci_update_background_scan(hdev);
+       }
+
+complete:
+       err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
+                          MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
+                          u16 len)
+{
+       struct mgmt_cp_load_conn_param *cp = data;
+       const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
+                                    sizeof(struct mgmt_conn_param));
+       u16 param_count, expected_len;
+       int i;
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       param_count = __le16_to_cpu(cp->param_count);
+       if (param_count > max_param_count) {
+               BT_ERR("load_conn_param: too big param_count value %u",
+                      param_count);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
+
+       expected_len = sizeof(*cp) + param_count *
+                                       sizeof(struct mgmt_conn_param);
+       if (expected_len != len) {
+               BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
+                      expected_len, len);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
+
+       BT_DBG("%s param_count %u", hdev->name, param_count);
+
+       hci_dev_lock(hdev);
+
+       hci_conn_params_clear_disabled(hdev);
+
+       for (i = 0; i < param_count; i++) {
+               struct mgmt_conn_param *param = &cp->params[i];
+               struct hci_conn_params *hci_param;
+               u16 min, max, latency, timeout;
+               u8 addr_type;
+
+               BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
+                      param->addr.type);
+
+               if (param->addr.type == BDADDR_LE_PUBLIC) {
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               } else if (param->addr.type == BDADDR_LE_RANDOM) {
+                       addr_type = ADDR_LE_DEV_RANDOM;
+               } else {
+                       BT_ERR("Ignoring invalid connection parameters");
+                       continue;
+               }
+
+               min = le16_to_cpu(param->min_interval);
+               max = le16_to_cpu(param->max_interval);
+               latency = le16_to_cpu(param->latency);
+               timeout = le16_to_cpu(param->timeout);
+
+               BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
+                      min, max, latency, timeout);
+
+               if (hci_check_conn_params(min, max, latency, timeout) < 0) {
+                       BT_ERR("Ignoring invalid connection parameters");
+                       continue;
+               }
+
+               hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
+                                               addr_type);
+               if (!hci_param) {
+                       BT_ERR("Failed to add connection parameters");
+                       continue;
+               }
+
+               hci_param->conn_min_interval = min;
+               hci_param->conn_max_interval = max;
+               hci_param->conn_latency = latency;
+               hci_param->supervision_timeout = timeout;
+       }
+
+       hci_dev_unlock(hdev);
+
+       return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
+}
+
+static int set_external_config(struct sock *sk, struct hci_dev *hdev,
+                              void *data, u16 len)
+{
+       struct mgmt_cp_set_external_config *cp = data;
+       bool changed;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev_is_powered(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                 MGMT_STATUS_REJECTED);
+
+       if (cp->config != 0x00 && cp->config != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                   MGMT_STATUS_INVALID_PARAMS);
+
+       if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       hci_dev_lock(hdev);
+
+       if (cp->config)
+               changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
+                                           &hdev->dev_flags);
+       else
+               changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
+                                            &hdev->dev_flags);
+
+       err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (!changed)
+               goto unlock;
+
+       err = new_options(hdev, sk);
+
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
+               mgmt_index_removed(hdev);
+
+               if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
+                       set_bit(HCI_CONFIG, &hdev->dev_flags);
+                       set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
+                       queue_work(hdev->req_workqueue, &hdev->power_on);
+               } else {
+                       set_bit(HCI_RAW, &hdev->flags);
+                       mgmt_index_added(hdev);
+               }
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int set_public_address(struct sock *sk, struct hci_dev *hdev,
+                             void *data, u16 len)
+{
+       struct mgmt_cp_set_public_address *cp = data;
+       bool changed;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (hdev_is_powered(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                 MGMT_STATUS_REJECTED);
+
+       if (!bacmp(&cp->bdaddr, BDADDR_ANY))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       if (!hdev->set_bdaddr)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       hci_dev_lock(hdev);
+
+       changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
+       bacpy(&hdev->public_addr, &cp->bdaddr);
+
+       err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (!changed)
+               goto unlock;
+
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               err = new_options(hdev, sk);
+
+       if (is_configured(hdev)) {
+               mgmt_index_removed(hdev);
+
+               clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
+
+               set_bit(HCI_CONFIG, &hdev->dev_flags);
+               set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
+               queue_work(hdev->req_workqueue, &hdev->power_on);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
 static const struct mgmt_handler {
        int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
                     u16 data_len);
@@ -4846,9 +5688,16 @@ static const struct mgmt_handler {
        { set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
        { load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
        { get_conn_info,          false, MGMT_GET_CONN_INFO_SIZE },
+       { get_clock_info,         false, MGMT_GET_CLOCK_INFO_SIZE },
+       { add_device,             false, MGMT_ADD_DEVICE_SIZE },
+       { remove_device,          false, MGMT_REMOVE_DEVICE_SIZE },
+       { load_conn_param,        true,  MGMT_LOAD_CONN_PARAM_SIZE },
+       { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
+       { read_config_info,       false, MGMT_READ_CONFIG_INFO_SIZE },
+       { set_external_config,    false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
+       { set_public_address,     false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
 };
 
-
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
 {
        void *buf;
@@ -4892,11 +5741,21 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                }
 
                if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+                   test_bit(HCI_CONFIG, &hdev->dev_flags) ||
                    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
                        err = cmd_status(sk, index, opcode,
                                         MGMT_STATUS_INVALID_INDEX);
                        goto done;
                }
+
+               if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
+                   opcode != MGMT_OP_READ_CONFIG_INFO &&
+                   opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
+                   opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
+                       err = cmd_status(sk, index, opcode,
+                                        MGMT_STATUS_INVALID_INDEX);
+                       goto done;
+               }
        }
 
        if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -4907,8 +5766,15 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                goto done;
        }
 
-       if ((hdev && opcode < MGMT_OP_READ_INFO) ||
-           (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+       if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
+                    opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
+               err = cmd_status(sk, index, opcode,
+                                MGMT_STATUS_INVALID_INDEX);
+               goto done;
+       }
+
+       if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
+                     opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
                err = cmd_status(sk, index, opcode,
                                 MGMT_STATUS_INVALID_INDEX);
                goto done;
@@ -4947,7 +5813,13 @@ void mgmt_index_added(struct hci_dev *hdev)
        if (hdev->dev_type != HCI_BREDR)
                return;
 
-       mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+               return;
+
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
+       else
+               mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
 void mgmt_index_removed(struct hci_dev *hdev)
@@ -4957,20 +5829,41 @@ void mgmt_index_removed(struct hci_dev *hdev)
        if (hdev->dev_type != HCI_BREDR)
                return;
 
+       if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+               return;
+
        mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
-       mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
+       if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
+               mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
+       else
+               mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
 /* This function requires the caller holds hdev->lock */
-static void restart_le_auto_conns(struct hci_dev *hdev)
+static void restart_le_actions(struct hci_dev *hdev)
 {
        struct hci_conn_params *p;
 
        list_for_each_entry(p, &hdev->le_conn_params, list) {
-               if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
-                       hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
+               /* Needed for AUTO_OFF case where might not "really"
+                * have been powered off.
+                */
+               list_del_init(&p->action);
+
+               switch (p->auto_connect) {
+               case HCI_AUTO_CONN_ALWAYS:
+                       list_add(&p->action, &hdev->pend_le_conns);
+                       break;
+               case HCI_AUTO_CONN_REPORT:
+                       list_add(&p->action, &hdev->pend_le_reports);
+                       break;
+               default:
+                       break;
+               }
        }
+
+       hci_update_background_scan(hdev);
 }
 
 static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -4981,7 +5874,7 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
 
        hci_dev_lock(hdev);
 
-       restart_le_auto_conns(hdev);
+       restart_le_actions(hdev);
 
        mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
@@ -5190,6 +6083,14 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
        if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
                return;
 
+       /* If something else than mgmt changed the page scan state we
+        * can't differentiate this from a change triggered by adding
+        * the first element to the whitelist. Therefore, avoid
+        * incorrectly setting HCI_CONNECTABLE.
+        */
+       if (connectable && !list_empty(&hdev->whitelist))
+               return;
+
        if (connectable)
                changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
        else
@@ -5199,18 +6100,6 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
                new_settings(hdev, NULL);
 }
 
-void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
-{
-       /* Powering off may stop advertising - don't let that interfere */
-       if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
-               return;
-
-       if (advertising)
-               set_bit(HCI_ADVERTISING, &hdev->dev_flags);
-       else
-               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
-}
-
 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
        u8 mgmt_err = mgmt_status(status);
@@ -5279,7 +6168,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        ev.key.ediv = key->ediv;
        ev.key.rand = key->rand;
 
-       if (key->type == HCI_SMP_LTK)
+       if (key->type == SMP_LTK)
                ev.key.master = 1;
 
        memcpy(ev.key.val, key->val, sizeof(key->val));
@@ -5347,6 +6236,27 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
        mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
 }
 
+void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                        u8 bdaddr_type, u8 store_hint, u16 min_interval,
+                        u16 max_interval, u16 latency, u16 timeout)
+{
+       struct mgmt_ev_new_conn_param ev;
+
+       if (!hci_is_identity_address(bdaddr, bdaddr_type))
+               return;
+
+       memset(&ev, 0, sizeof(ev));
+       bacpy(&ev.addr.bdaddr, bdaddr);
+       ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
+       ev.store_hint = store_hint;
+       ev.min_interval = cpu_to_le16(min_interval);
+       ev.max_interval = cpu_to_le16(max_interval);
+       ev.latency = cpu_to_le16(latency);
+       ev.timeout = cpu_to_le16(timeout);
+
+       mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
+}
+
 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
                                  u8 data_len)
 {
@@ -5765,10 +6675,14 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
 
        hci_req_init(&req, hdev);
 
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+               if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
+                       hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
+                                   sizeof(enable), &enable);
                update_eir(&req);
-       else
+       } else {
                clear_eir(&req);
+       }
 
        hci_req_run(&req, NULL);
 }
@@ -5912,17 +6826,23 @@ void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
 }
 
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-                      u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
-                      u8 scan_rsp_len)
+                      u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
+                      u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
 {
        char buf[512];
        struct mgmt_ev_device_found *ev = (void *) buf;
-       struct smp_irk *irk;
        size_t ev_size;
 
-       if (!hci_discovery_active(hdev))
-               return;
+       /* Don't send events for a non-kernel initiated discovery. With
+        * LE one exception is if we have pend_le_reports > 0 in which
+        * case we're doing passive scanning and want these events.
+        */
+       if (!hci_discovery_active(hdev)) {
+               if (link_type == ACL_LINK)
+                       return;
+               if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
+                       return;
+       }
 
        /* Make sure that the buffer is big enough. The 5 extra bytes
         * are for the potential CoD field.
@@ -5932,20 +6852,10 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        memset(buf, 0, sizeof(buf));
 
-       irk = hci_get_irk(hdev, bdaddr, addr_type);
-       if (irk) {
-               bacpy(&ev->addr.bdaddr, &irk->bdaddr);
-               ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
-       } else {
-               bacpy(&ev->addr.bdaddr, bdaddr);
-               ev->addr.type = link_to_bdaddr(link_type, addr_type);
-       }
-
+       bacpy(&ev->addr.bdaddr, bdaddr);
+       ev->addr.type = link_to_bdaddr(link_type, addr_type);
        ev->rssi = rssi;
-       if (cfm_name)
-               ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
-       if (!ssp)
-               ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
+       ev->flags = cpu_to_le32(flags);
 
        if (eir_len > 0)
                memcpy(ev->eir, eir, eir_len);
@@ -6013,63 +6923,19 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
        mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-       struct pending_cmd *cmd;
-       struct mgmt_ev_device_blocked ev;
-
-       cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
-
-       bacpy(&ev.addr.bdaddr, bdaddr);
-       ev.addr.type = type;
-
-       return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
-                         cmd ? cmd->sk : NULL);
-}
-
-int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
-{
-       struct pending_cmd *cmd;
-       struct mgmt_ev_device_unblocked ev;
-
-       cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
-
-       bacpy(&ev.addr.bdaddr, bdaddr);
-       ev.addr.type = type;
-
-       return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
-                         cmd ? cmd->sk : NULL);
-}
-
 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
 {
        BT_DBG("%s status %u", hdev->name, status);
-
-       /* Clear the advertising mgmt setting if we failed to re-enable it */
-       if (status) {
-               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
-               new_settings(hdev, NULL);
-       }
 }
 
 void mgmt_reenable_advertising(struct hci_dev *hdev)
 {
        struct hci_request req;
 
-       if (hci_conn_num(hdev, LE_LINK) > 0)
-               return;
-
        if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
                return;
 
        hci_req_init(&req, hdev);
        enable_advertising(&req);
-
-       /* If this fails we have no option but to let user space know
-        * that we've disabled advertising.
-        */
-       if (hci_req_run(&req, adv_enable_complete) < 0) {
-               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
-               new_settings(hdev, NULL);
-       }
+       hci_req_run(&req, adv_enable_complete);
 }
index e33a982161c1db063b5cb96f06a10babb5f0b436..55c41de2f5a03c1a1f6de41ca722c7568d511b3d 100644 (file)
 
 #define AUTH_REQ_MASK   0x07
 
-#define SMP_FLAG_TK_VALID      1
-#define SMP_FLAG_CFM_PENDING   2
-#define SMP_FLAG_MITM_AUTH     3
-#define SMP_FLAG_COMPLETE      4
-#define SMP_FLAG_INITIATOR     5
+enum {
+       SMP_FLAG_TK_VALID,
+       SMP_FLAG_CFM_PENDING,
+       SMP_FLAG_MITM_AUTH,
+       SMP_FLAG_COMPLETE,
+       SMP_FLAG_INITIATOR,
+};
 
 struct smp_chan {
        struct l2cap_conn *conn;
@@ -60,20 +62,16 @@ struct smp_chan {
        struct smp_ltk  *slave_ltk;
        struct smp_irk  *remote_irk;
        unsigned long   flags;
+
+       struct crypto_blkcipher *tfm_aes;
 };
 
-static inline void swap128(const u8 src[16], u8 dst[16])
+static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
 {
-       int i;
-       for (i = 0; i < 16; i++)
-               dst[15 - i] = src[i];
-}
+       size_t i;
 
-static inline void swap56(const u8 src[7], u8 dst[7])
-{
-       int i;
-       for (i = 0; i < 7; i++)
-               dst[6 - i] = src[i];
+       for (i = 0; i < len; i++)
+               dst[len - 1 - i] = src[i];
 }
 
 static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
@@ -92,7 +90,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        desc.flags = 0;
 
        /* The most significant octet of key corresponds to k[0] */
-       swap128(k, tmp);
+       swap_buf(k, tmp, 16);
 
        err = crypto_blkcipher_setkey(tfm, tmp, 16);
        if (err) {
@@ -101,7 +99,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        }
 
        /* Most significant octet of plaintextData corresponds to data[0] */
-       swap128(r, data);
+       swap_buf(r, data, 16);
 
        sg_init_one(&sg, data, 16);
 
@@ -110,7 +108,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
                BT_ERR("Encrypt data error %d", err);
 
        /* Most significant octet of encryptedData corresponds to data[0] */
-       swap128(data, r);
+       swap_buf(data, r, 16);
 
        return err;
 }
@@ -174,13 +172,16 @@ int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
        return 0;
 }
 
-static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
-                 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
-                 u8 _rat, bdaddr_t *ra, u8 res[16])
+static int smp_c1(struct smp_chan *smp, u8 k[16], u8 r[16], u8 preq[7],
+                 u8 pres[7], u8 _iat, bdaddr_t *ia, u8 _rat, bdaddr_t *ra,
+                 u8 res[16])
 {
+       struct hci_dev *hdev = smp->conn->hcon->hdev;
        u8 p1[16], p2[16];
        int err;
 
+       BT_DBG("%s", hdev->name);
+
        memset(p1, 0, 16);
 
        /* p1 = pres || preq || _rat || _iat */
@@ -198,7 +199,7 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
        u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
 
        /* res = e(k, res) */
-       err = smp_e(tfm, k, res);
+       err = smp_e(smp->tfm_aes, k, res);
        if (err) {
                BT_ERR("Encrypt data error");
                return err;
@@ -208,23 +209,26 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
        u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
 
        /* res = e(k, res) */
-       err = smp_e(tfm, k, res);
+       err = smp_e(smp->tfm_aes, k, res);
        if (err)
                BT_ERR("Encrypt data error");
 
        return err;
 }
 
-static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
-                 u8 r2[16], u8 _r[16])
+static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16],
+                 u8 _r[16])
 {
+       struct hci_dev *hdev = smp->conn->hcon->hdev;
        int err;
 
+       BT_DBG("%s", hdev->name);
+
        /* Just least significant octets from r1 and r2 are considered */
        memcpy(_r, r2, 8);
        memcpy(_r + 8, r1, 8);
 
-       err = smp_e(tfm, k, _r);
+       err = smp_e(smp->tfm_aes, k, _r);
        if (err)
                BT_ERR("Encrypt data error");
 
@@ -439,7 +443,7 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
         * Confirms and the slave Enters the passkey.
         */
        if (method == OVERLAP) {
-               if (hcon->link_mode & HCI_LM_MASTER)
+               if (test_bit(HCI_CONN_MASTER, &hcon->flags))
                        method = CFM_PASSKEY;
                else
                        method = REQ_PASSKEY;
@@ -477,23 +481,15 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
 static u8 smp_confirm(struct smp_chan *smp)
 {
        struct l2cap_conn *conn = smp->conn;
-       struct hci_dev *hdev = conn->hcon->hdev;
-       struct crypto_blkcipher *tfm = hdev->tfm_aes;
        struct smp_cmd_pairing_confirm cp;
        int ret;
 
        BT_DBG("conn %p", conn);
 
-       /* Prevent mutual access to hdev->tfm_aes */
-       hci_dev_lock(hdev);
-
-       ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+       ret = smp_c1(smp, smp->tk, smp->prnd, smp->preq, smp->prsp,
                     conn->hcon->init_addr_type, &conn->hcon->init_addr,
                     conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
                     cp.confirm_val);
-
-       hci_dev_unlock(hdev);
-
        if (ret)
                return SMP_UNSPECIFIED;
 
@@ -508,25 +504,17 @@ static u8 smp_random(struct smp_chan *smp)
 {
        struct l2cap_conn *conn = smp->conn;
        struct hci_conn *hcon = conn->hcon;
-       struct hci_dev *hdev = hcon->hdev;
-       struct crypto_blkcipher *tfm = hdev->tfm_aes;
        u8 confirm[16];
        int ret;
 
-       if (IS_ERR_OR_NULL(tfm))
+       if (IS_ERR_OR_NULL(smp->tfm_aes))
                return SMP_UNSPECIFIED;
 
        BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
 
-       /* Prevent mutual access to hdev->tfm_aes */
-       hci_dev_lock(hdev);
-
-       ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+       ret = smp_c1(smp, smp->tk, smp->rrnd, smp->preq, smp->prsp,
                     hcon->init_addr_type, &hcon->init_addr,
                     hcon->resp_addr_type, &hcon->resp_addr, confirm);
-
-       hci_dev_unlock(hdev);
-
        if (ret)
                return SMP_UNSPECIFIED;
 
@@ -540,7 +528,7 @@ static u8 smp_random(struct smp_chan *smp)
                __le64 rand = 0;
                __le16 ediv = 0;
 
-               smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk);
+               smp_s1(smp, smp->tk, smp->rrnd, smp->prnd, stk);
 
                memset(stk + smp->enc_key_size, 0,
                       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -550,6 +538,7 @@ static u8 smp_random(struct smp_chan *smp)
 
                hci_le_start_enc(hcon, ediv, rand, stk);
                hcon->enc_key_size = smp->enc_key_size;
+               set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
        } else {
                u8 stk[16], auth;
                __le64 rand = 0;
@@ -558,7 +547,7 @@ static u8 smp_random(struct smp_chan *smp)
                smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
                             smp->prnd);
 
-               smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk);
+               smp_s1(smp, smp->tk, smp->prnd, smp->rrnd, stk);
 
                memset(stk + smp->enc_key_size, 0,
                       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -568,9 +557,12 @@ static u8 smp_random(struct smp_chan *smp)
                else
                        auth = 0;
 
+               /* Even though there's no _SLAVE suffix this is the
+                * slave STK we're adding for later lookup (the master
+                * STK never needs to be stored).
+                */
                hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
-                           HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
-                           ediv, rand);
+                           SMP_STK, auth, stk, smp->enc_key_size, ediv, rand);
        }
 
        return 0;
@@ -584,9 +576,15 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
        if (!smp)
                return NULL;
 
+       smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(smp->tfm_aes)) {
+               BT_ERR("Unable to create ECB crypto context");
+               kfree(smp);
+               return NULL;
+       }
+
        smp->conn = conn;
        conn->smp_chan = smp;
-       conn->hcon->smp_conn = conn;
 
        hci_conn_hold(conn->hcon);
 
@@ -606,6 +604,8 @@ void smp_chan_destroy(struct l2cap_conn *conn)
        kfree(smp->csrk);
        kfree(smp->slave_csrk);
 
+       crypto_free_blkcipher(smp->tfm_aes);
+
        /* If pairing failed clean up any keys we might have */
        if (!complete) {
                if (smp->ltk) {
@@ -626,19 +626,18 @@ void smp_chan_destroy(struct l2cap_conn *conn)
 
        kfree(smp);
        conn->smp_chan = NULL;
-       conn->hcon->smp_conn = NULL;
        hci_conn_drop(conn->hcon);
 }
 
 int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
 {
-       struct l2cap_conn *conn = hcon->smp_conn;
+       struct l2cap_conn *conn = hcon->l2cap_data;
        struct smp_chan *smp;
        u32 value;
 
        BT_DBG("");
 
-       if (!conn)
+       if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
                return -ENOTCONN;
 
        smp = conn->smp_chan;
@@ -684,7 +683,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (skb->len < sizeof(*req))
                return SMP_INVALID_PARAMS;
 
-       if (conn->hcon->link_mode & HCI_LM_MASTER)
+       if (test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
                return SMP_CMD_NOTSUPP;
 
        if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
@@ -751,7 +750,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        if (skb->len < sizeof(*rsp))
                return SMP_INVALID_PARAMS;
 
-       if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+       if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
                return SMP_CMD_NOTSUPP;
 
        skb_pull(skb, sizeof(*rsp));
@@ -839,7 +838,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
        return smp_random(smp);
 }
 
-static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
+static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
 {
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
@@ -847,18 +846,40 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
        key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
                                   hcon->out);
        if (!key)
-               return 0;
+               return false;
 
        if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
-               return 0;
+               return false;
 
        if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
-               return 1;
+               return true;
 
        hci_le_start_enc(hcon, key->ediv, key->rand, key->val);
        hcon->enc_key_size = key->enc_size;
 
-       return 1;
+       /* We never store STKs for master role, so clear this flag */
+       clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags);
+
+       return true;
+}
+
+bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
+{
+       if (sec_level == BT_SECURITY_LOW)
+               return true;
+
+       /* If we're encrypted with an STK always claim insufficient
+        * security. This way we allow the connection to be re-encrypted
+        * with an LTK, even if the LTK provides the same level of
+        * security.
+        */
+       if (test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags))
+               return false;
+
+       if (hcon->sec_level >= sec_level)
+               return true;
+
+       return false;
 }
 
 static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -874,10 +895,13 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (skb->len < sizeof(*rp))
                return SMP_INVALID_PARAMS;
 
-       if (!(conn->hcon->link_mode & HCI_LM_MASTER))
+       if (!test_bit(HCI_CONN_MASTER, &conn->hcon->flags))
                return SMP_CMD_NOTSUPP;
 
        sec_level = authreq_to_seclevel(rp->auth_req);
+       if (smp_sufficient_security(hcon, sec_level))
+               return 0;
+
        if (sec_level > hcon->pending_sec_level)
                hcon->pending_sec_level = sec_level;
 
@@ -888,6 +912,8 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
                return 0;
 
        smp = smp_chan_create(conn);
+       if (!smp)
+               return SMP_UNSPECIFIED;
 
        skb_pull(skb, sizeof(*rp));
 
@@ -904,17 +930,6 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        return 0;
 }
 
-bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
-{
-       if (sec_level == BT_SECURITY_LOW)
-               return true;
-
-       if (hcon->sec_level >= sec_level)
-               return true;
-
-       return false;
-}
-
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 {
        struct l2cap_conn *conn = hcon->l2cap_data;
@@ -936,7 +951,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
        if (sec_level > hcon->pending_sec_level)
                hcon->pending_sec_level = sec_level;
 
-       if (hcon->link_mode & HCI_LM_MASTER)
+       if (test_bit(HCI_CONN_MASTER, &hcon->flags))
                if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                        return 0;
 
@@ -956,7 +971,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
            hcon->pending_sec_level > BT_SECURITY_MEDIUM)
                authreq |= SMP_AUTH_MITM;
 
-       if (hcon->link_mode & HCI_LM_MASTER) {
+       if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
                struct smp_cmd_pairing cp;
 
                build_pairing_cmd(conn, &cp, NULL, authreq);
@@ -1021,7 +1036,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
        authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
-       ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK,
+       ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK,
                          authenticated, smp->tk, smp->enc_key_size,
                          rp->ediv, rp->rand);
        smp->ltk = ltk;
@@ -1075,6 +1090,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
 
        skb_pull(skb, sizeof(*info));
 
+       hci_dev_lock(hcon->hdev);
+
        /* Strictly speaking the Core Specification (4.1) allows sending
         * an empty address which would force us to rely on just the IRK
         * as "identity information". However, since such
@@ -1084,8 +1101,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
         */
        if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
                BT_ERR("Ignoring IRK with no identity address");
-               smp_distribute_keys(conn);
-               return 0;
+               goto distribute;
        }
 
        bacpy(&smp->id_addr, &info->bdaddr);
@@ -1099,8 +1115,11 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
        smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr,
                                      smp->id_addr_type, smp->irk, &rpa);
 
+distribute:
        smp_distribute_keys(conn);
 
+       hci_dev_unlock(hcon->hdev);
+
        return 0;
 }
 
@@ -1337,7 +1356,7 @@ int smp_distribute_keys(struct l2cap_conn *conn)
 
                authenticated = hcon->sec_level == BT_SECURITY_HIGH;
                ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
-                                 HCI_SMP_LTK_SLAVE, authenticated, enc.ltk,
+                                 SMP_LTK_SLAVE, authenticated, enc.ltk,
                                  smp->enc_key_size, ediv, rand);
                smp->slave_ltk = ltk;
 
index 5a8dc36460a1b7e3a61b08d18407708673a7d1a5..796f4f45f92f67e43e32d75bfb015b5175f2aba0 100644 (file)
@@ -116,6 +116,13 @@ struct smp_cmd_security_req {
 #define SMP_MIN_ENC_KEY_SIZE           7
 #define SMP_MAX_ENC_KEY_SIZE           16
 
+/* LTK types used in internal storage (struct smp_ltk) */
+enum {
+       SMP_STK,
+       SMP_LTK,
+       SMP_LTK_SLAVE,
+};
+
 /* SMP Commands */
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index b524c36c12731b6c84a7587125fdf2db6417c830..0bb9d8b63dd2282ff62d9c292e4edac6af88d4f0 100644 (file)
@@ -93,7 +93,7 @@ static void fdb_rcu_free(struct rcu_head *head)
 static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
 {
        int err;
-       struct net_bridge_port *p, *tmp;
+       struct net_bridge_port *p;
 
        ASSERT_RTNL();
 
@@ -107,11 +107,9 @@ static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
 
        return;
 undo:
-       list_for_each_entry(tmp, &br->port_list, list) {
-               if (tmp == p)
-                       break;
-               if (!br_promisc_port(tmp))
-                       dev_uc_del(tmp->dev, addr);
+       list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+               if (!br_promisc_port(p))
+                       dev_uc_del(p->dev, addr);
        }
 }
 
@@ -678,6 +676,7 @@ errout:
 int br_fdb_dump(struct sk_buff *skb,
                struct netlink_callback *cb,
                struct net_device *dev,
+               struct net_device *filter_dev,
                int idx)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -693,6 +692,19 @@ int br_fdb_dump(struct sk_buff *skb,
                        if (idx < cb->args[0])
                                goto skip;
 
+                       if (filter_dev &&
+                           (!f->dst || f->dst->dev != filter_dev)) {
+                               if (filter_dev != dev)
+                                       goto skip;
+                               /* !f->dst is a speacial case for bridge
+                                * It means the MAC belongs to the bridge
+                                * Therefore need a little more filtering
+                                * we only want to dump the !f->dst case
+                                */
+                               if (f->dst)
+                                       goto skip;
+                       }
+
                        if (fdb_fill_info(skb, br, f,
                                          NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq,
index 3eca3fdf8fe1c7563150f735991e66380c4312ca..078d336a1f379f80d622e4fffa187630593827cf 100644 (file)
@@ -344,7 +344,7 @@ int br_add_bridge(struct net *net, const char *name)
        struct net_device *dev;
        int res;
 
-       dev = alloc_netdev(sizeof(struct net_bridge), name,
+       dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
                           br_dev_setup);
 
        if (!dev)
index abfa0b65a1118eb0abae4dd69f78e7285537acd8..b4845f4b2bb414c7403e225528add6840eb26f74 100644 (file)
@@ -2215,6 +2215,43 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
 
+/**
+ * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
+ * @dev: The bridge port providing the bridge on which to check for a querier
+ * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
+ *
+ * Checks whether the given interface has a bridge on top and if so returns
+ * true if a valid querier exists anywhere on the bridged link layer.
+ * Otherwise returns false.
+ */
+bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
+{
+       struct net_bridge *br;
+       struct net_bridge_port *port;
+       struct ethhdr eth;
+       bool ret = false;
+
+       rcu_read_lock();
+       if (!br_port_exists(dev))
+               goto unlock;
+
+       port = br_port_get_rcu(dev);
+       if (!port || !port->br)
+               goto unlock;
+
+       br = port->br;
+
+       memset(&eth, 0, sizeof(eth));
+       eth.h_proto = htons(proto);
+
+       ret = br_multicast_querier_exists(br, &eth);
+
+unlock:
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
+
 /**
  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
  * @dev: The bridge port adjacent to which to check for a querier
index 23caf5b0309efe9e37b5d2b7bace5e1a9a75bf82..62a7fa2e356921d27729d365627d490dbf8a6126 100644 (file)
@@ -399,7 +399,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
               const unsigned char *addr, u16 nlh_flags);
 int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct net_device *dev, int idx);
+               struct net_device *dev, struct net_device *fdev, int idx);
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 
index 629dc77874a9975feed56bb7b6abefad76cfad94..4ce0b313f72c9a47c0f300fbd91f03e31683a55d 100644 (file)
@@ -14,6 +14,9 @@ config NFT_BRIDGE_META
        help
          Add support for bridge dedicated meta key.
 
+config NF_LOG_BRIDGE
+       tristate "Bridge packet logging"
+
 endif # NF_TABLES_BRIDGE
 
 menuconfig BRIDGE_NF_EBTABLES
@@ -202,22 +205,6 @@ config BRIDGE_EBT_LOG
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config BRIDGE_EBT_ULOG
-       tristate "ebt: ulog support (OBSOLETE)"
-       help
-         This option enables the old bridge-specific "ebt_ulog" implementation
-         which has been obsoleted by the new "nfnetlink_log" code (see
-         CONFIG_NETFILTER_NETLINK_LOG).
-
-         This option adds the ulog watcher, that you can use in any rule
-         in any ebtables table. The packet is passed to a userspace
-         logging daemon using netlink multicast sockets. This differs
-         from the log watcher in the sense that the complete packet is
-         sent to userspace instead of a descriptive text and that
-         netlink multicast sockets are used instead of the syslog.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config BRIDGE_EBT_NFLOG
        tristate "ebt: nflog support"
        help
index 6f2f3943d66f34b43c72be21b603bbf51ba0d289..1f78ea0d90e40c23e750f3cd3c644b2fccc1bf0a 100644 (file)
@@ -5,6 +5,9 @@
 obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
 obj-$(CONFIG_NFT_BRIDGE_META)  += nft_meta_bridge.o
 
+# packet logging
+obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o
+
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
 # tables
index 5322a36867a314c796bc8ff20f095fa217aea1f3..17f2e4bc2a29fcbb6d40dc408d5472fb67c5721c 100644 (file)
@@ -186,6 +186,10 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
        li.u.log.level = info->loglevel;
        li.u.log.logflags = info->bitmask;
 
+       /* Remember that we have to use ebt_log_packet() not to break backward
+        * compatibility. We cannot use the default bridge packet logger via
+        * nf_log_packet() with NFT_LOG_TYPE_LOG here. --Pablo
+        */
        if (info->bitmask & EBT_LOG_NFLOG)
                nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
                              par->in, par->out, &li, "%s", info->prefix);
@@ -205,54 +209,13 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
        .me             = THIS_MODULE,
 };
 
-static struct nf_logger ebt_log_logger __read_mostly = {
-       .name           = "ebt_log",
-       .logfn          = &ebt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ebt_log_net_init(struct net *net)
-{
-       nf_log_set(net, NFPROTO_BRIDGE, &ebt_log_logger);
-       return 0;
-}
-
-static void __net_exit ebt_log_net_fini(struct net *net)
-{
-       nf_log_unset(net, &ebt_log_logger);
-}
-
-static struct pernet_operations ebt_log_net_ops = {
-       .init = ebt_log_net_init,
-       .exit = ebt_log_net_fini,
-};
-
 static int __init ebt_log_init(void)
 {
-       int ret;
-
-       ret = register_pernet_subsys(&ebt_log_net_ops);
-       if (ret < 0)
-               goto err_pernet;
-
-       ret = xt_register_target(&ebt_log_tg_reg);
-       if (ret < 0)
-               goto err_target;
-
-       nf_log_register(NFPROTO_BRIDGE, &ebt_log_logger);
-
-       return ret;
-
-err_target:
-       unregister_pernet_subsys(&ebt_log_net_ops);
-err_pernet:
-       return ret;
+       return xt_register_target(&ebt_log_tg_reg);
 }
 
 static void __exit ebt_log_fini(void)
 {
-       unregister_pernet_subsys(&ebt_log_net_ops);
-       nf_log_unregister(&ebt_log_logger);
        xt_unregister_target(&ebt_log_tg_reg);
 }
 
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
deleted file mode 100644 (file)
index 7c470c3..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
- * netfilter module for userspace bridged Ethernet frames logging daemons
- *
- *     Authors:
- *     Bart De Schuymer <bdschuym@pandora.be>
- *     Harald Welte <laforge@netfilter.org>
- *
- *  November, 2004
- *
- * Based on ipt_ULOG.c, which is
- * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
- *
- * This module accepts two parameters:
- *
- * nlbufsiz:
- *   The parameter specifies how big the buffer for each netlink multicast
- * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
- * get accumulated in the kernel until they are sent to userspace. It is
- * NOT possible to allocate more than 128kB, and it is strongly discouraged,
- * because atomically allocating 128kB inside the network rx softirq is not
- * reliable. Please also keep in mind that this buffer size is allocated for
- * each nlgroup you are using, so the total kernel memory usage increases
- * by that factor.
- *
- * flushtimeout:
- *   Specify, after how many hundredths of a second the queue should be
- *   flushed even if it is not full yet.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <net/netlink.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_bridge/ebtables.h>
-#include <linux/netfilter_bridge/ebt_ulog.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netns/generic.h>
-#include <net/sock.h>
-#include "../br_private.h"
-
-static unsigned int nlbufsiz = NLMSG_GOODSIZE;
-module_param(nlbufsiz, uint, 0600);
-MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
-                          "(defaults to 4096)");
-
-static unsigned int flushtimeout = 10;
-module_param(flushtimeout, uint, 0600);
-MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
-                              "(defaults to 10)");
-
-typedef struct {
-       unsigned int qlen;              /* number of nlmsgs' in the skb */
-       struct nlmsghdr *lastnlh;       /* netlink header of last msg in skb */
-       struct sk_buff *skb;            /* the pre-allocated skb */
-       struct timer_list timer;        /* the timer function */
-       spinlock_t lock;                /* the per-queue lock */
-} ebt_ulog_buff_t;
-
-static int ebt_ulog_net_id __read_mostly;
-struct ebt_ulog_net {
-       unsigned int nlgroup[EBT_ULOG_MAXNLGROUPS];
-       ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
-       struct sock *ebtulognl;
-};
-
-static struct ebt_ulog_net *ebt_ulog_pernet(struct net *net)
-{
-       return net_generic(net, ebt_ulog_net_id);
-}
-
-/* send one ulog_buff_t to userspace */
-static void ulog_send(struct ebt_ulog_net *ebt, unsigned int nlgroup)
-{
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[nlgroup];
-
-       del_timer(&ub->timer);
-
-       if (!ub->skb)
-               return;
-
-       /* last nlmsg needs NLMSG_DONE */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_type = NLMSG_DONE;
-
-       NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
-       netlink_broadcast(ebt->ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
-
-       ub->qlen = 0;
-       ub->skb = NULL;
-}
-
-/* timer function to flush queue in flushtimeout time */
-static void ulog_timer(unsigned long data)
-{
-       struct ebt_ulog_net *ebt = container_of((void *)data,
-                                               struct ebt_ulog_net,
-                                               nlgroup[*(unsigned int *)data]);
-
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[*(unsigned int *)data];
-       spin_lock_bh(&ub->lock);
-       if (ub->skb)
-               ulog_send(ebt, *(unsigned int *)data);
-       spin_unlock_bh(&ub->lock);
-}
-
-static struct sk_buff *ulog_alloc_skb(unsigned int size)
-{
-       struct sk_buff *skb;
-       unsigned int n;
-
-       n = max(size, nlbufsiz);
-       skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
-       if (!skb) {
-               if (n > size) {
-                       /* try to allocate only as much as we need for
-                        * current packet */
-                       skb = alloc_skb(size, GFP_ATOMIC);
-                       if (!skb)
-                               pr_debug("cannot even allocate buffer of size %ub\n",
-                                        size);
-               }
-       }
-
-       return skb;
-}
-
-static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
-                           const struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           const struct ebt_ulog_info *uloginfo,
-                           const char *prefix)
-{
-       ebt_ulog_packet_msg_t *pm;
-       size_t size, copy_len;
-       struct nlmsghdr *nlh;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-       unsigned int group = uloginfo->nlgroup;
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
-       spinlock_t *lock = &ub->lock;
-       ktime_t kt;
-
-       if ((uloginfo->cprange == 0) ||
-           (uloginfo->cprange > skb->len + ETH_HLEN))
-               copy_len = skb->len + ETH_HLEN;
-       else
-               copy_len = uloginfo->cprange;
-
-       size = nlmsg_total_size(sizeof(*pm) + copy_len);
-       if (size > nlbufsiz) {
-               pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
-               return;
-       }
-
-       spin_lock_bh(lock);
-
-       if (!ub->skb) {
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto unlock;
-       } else if (size > skb_tailroom(ub->skb)) {
-               ulog_send(ebt, group);
-
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto unlock;
-       }
-
-       nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
-                       size - NLMSG_ALIGN(sizeof(*nlh)), 0);
-       if (!nlh) {
-               kfree_skb(ub->skb);
-               ub->skb = NULL;
-               goto unlock;
-       }
-       ub->qlen++;
-
-       pm = nlmsg_data(nlh);
-       memset(pm, 0, sizeof(*pm));
-
-       /* Fill in the ulog data */
-       pm->version = EBT_ULOG_VERSION;
-       kt = ktime_get_real();
-       pm->stamp = ktime_to_timeval(kt);
-       if (ub->qlen == 1)
-               ub->skb->tstamp = kt;
-       pm->data_len = copy_len;
-       pm->mark = skb->mark;
-       pm->hook = hooknr;
-       if (uloginfo->prefix != NULL)
-               strcpy(pm->prefix, uloginfo->prefix);
-
-       if (in) {
-               strcpy(pm->physindev, in->name);
-               /* If in isn't a bridge, then physindev==indev */
-               if (br_port_exists(in))
-                       /* rcu_read_lock()ed by nf_hook_slow */
-                       strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
-               else
-                       strcpy(pm->indev, in->name);
-       }
-
-       if (out) {
-               /* If out exists, then out is a bridge port */
-               strcpy(pm->physoutdev, out->name);
-               /* rcu_read_lock()ed by nf_hook_slow */
-               strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
-       }
-
-       if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
-               BUG();
-
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
-
-       ub->lastnlh = nlh;
-
-       if (ub->qlen >= uloginfo->qthreshold)
-               ulog_send(ebt, group);
-       else if (!timer_pending(&ub->timer)) {
-               ub->timer.expires = jiffies + flushtimeout * HZ / 100;
-               add_timer(&ub->timer);
-       }
-
-unlock:
-       spin_unlock_bh(lock);
-}
-
-/* this function is registered with the netfilter core */
-static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
-   const struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, const struct nf_loginfo *li,
-   const char *prefix)
-{
-       struct ebt_ulog_info loginfo;
-
-       if (!li || li->type != NF_LOG_TYPE_ULOG) {
-               loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP;
-               loginfo.cprange = 0;
-               loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD;
-               loginfo.prefix[0] = '\0';
-       } else {
-               loginfo.nlgroup = li->u.ulog.group;
-               loginfo.cprange = li->u.ulog.copy_len;
-               loginfo.qthreshold = li->u.ulog.qthreshold;
-               strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
-       }
-
-       ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
-}
-
-static unsigned int
-ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-
-       ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
-                       par->targinfo, NULL);
-       return EBT_CONTINUE;
-}
-
-static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
-{
-       struct ebt_ulog_info *uloginfo = par->targinfo;
-
-       if (!par->net->xt.ebt_ulog_warn_deprecated) {
-               pr_info("ebt_ulog is deprecated and it will be removed soon, "
-                       "use ebt_nflog instead\n");
-               par->net->xt.ebt_ulog_warn_deprecated = true;
-       }
-
-       if (uloginfo->nlgroup > 31)
-               return -EINVAL;
-
-       uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
-
-       if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
-               uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
-
-       return 0;
-}
-
-static struct xt_target ebt_ulog_tg_reg __read_mostly = {
-       .name           = "ulog",
-       .revision       = 0,
-       .family         = NFPROTO_BRIDGE,
-       .target         = ebt_ulog_tg,
-       .checkentry     = ebt_ulog_tg_check,
-       .targetsize     = sizeof(struct ebt_ulog_info),
-       .me             = THIS_MODULE,
-};
-
-static struct nf_logger ebt_ulog_logger __read_mostly = {
-       .name           = "ebt_ulog",
-       .logfn          = &ebt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ebt_ulog_net_init(struct net *net)
-{
-       int i;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-
-       struct netlink_kernel_cfg cfg = {
-               .groups = EBT_ULOG_MAXNLGROUPS,
-       };
-
-       /* initialize ulog_buffers */
-       for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
-               ebt->nlgroup[i] = i;
-               setup_timer(&ebt->ulog_buffers[i].timer, ulog_timer,
-                           (unsigned long)&ebt->nlgroup[i]);
-               spin_lock_init(&ebt->ulog_buffers[i].lock);
-       }
-
-       ebt->ebtulognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
-       if (!ebt->ebtulognl)
-               return -ENOMEM;
-
-       nf_log_set(net, NFPROTO_BRIDGE, &ebt_ulog_logger);
-       return 0;
-}
-
-static void __net_exit ebt_ulog_net_fini(struct net *net)
-{
-       int i;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-
-       nf_log_unset(net, &ebt_ulog_logger);
-       for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
-               ebt_ulog_buff_t *ub = &ebt->ulog_buffers[i];
-               del_timer(&ub->timer);
-
-               if (ub->skb) {
-                       kfree_skb(ub->skb);
-                       ub->skb = NULL;
-               }
-       }
-       netlink_kernel_release(ebt->ebtulognl);
-}
-
-static struct pernet_operations ebt_ulog_net_ops = {
-       .init = ebt_ulog_net_init,
-       .exit = ebt_ulog_net_fini,
-       .id   = &ebt_ulog_net_id,
-       .size = sizeof(struct ebt_ulog_net),
-};
-
-static int __init ebt_ulog_init(void)
-{
-       int ret;
-
-       if (nlbufsiz >= 128*1024) {
-               pr_warn("Netlink buffer has to be <= 128kB,"
-                       "please try a smaller nlbufsiz parameter.\n");
-               return -EINVAL;
-       }
-
-       ret = register_pernet_subsys(&ebt_ulog_net_ops);
-       if (ret)
-               goto out_pernet;
-
-       ret = xt_register_target(&ebt_ulog_tg_reg);
-       if (ret)
-               goto out_target;
-
-       nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
-
-       return 0;
-
-out_target:
-       unregister_pernet_subsys(&ebt_ulog_net_ops);
-out_pernet:
-       return ret;
-}
-
-static void __exit ebt_ulog_fini(void)
-{
-       nf_log_unregister(&ebt_ulog_logger);
-       xt_unregister_target(&ebt_ulog_tg_reg);
-       unregister_pernet_subsys(&ebt_ulog_net_ops);
-}
-
-module_init(ebt_ulog_init);
-module_exit(ebt_ulog_fini);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
-MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
diff --git a/net/bridge/netfilter/nf_log_bridge.c b/net/bridge/netfilter/nf_log_bridge.c
new file mode 100644 (file)
index 0000000..5d9953a
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_bridge.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_log.h>
+
+static void nf_log_bridge_packet(struct net *net, u_int8_t pf,
+                                unsigned int hooknum,
+                                const struct sk_buff *skb,
+                                const struct net_device *in,
+                                const struct net_device *out,
+                                const struct nf_loginfo *loginfo,
+                                const char *prefix)
+{
+       switch (eth_hdr(skb)->h_proto) {
+       case htons(ETH_P_IP):
+               nf_log_packet(net, NFPROTO_IPV4, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       case htons(ETH_P_IPV6):
+               nf_log_packet(net, NFPROTO_IPV6, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_RARP):
+               nf_log_packet(net, NFPROTO_ARP, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       }
+}
+
+static struct nf_logger nf_bridge_logger __read_mostly = {
+       .name           = "nf_log_bridge",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_bridge_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_bridge_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_bridge_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_bridge_logger);
+}
+
+static struct pernet_operations nf_log_bridge_net_ops = {
+       .init = nf_log_bridge_net_init,
+       .exit = nf_log_bridge_net_exit,
+};
+
+static int __init nf_log_bridge_init(void)
+{
+       int ret;
+
+       /* Request to load the real packet loggers. */
+       nf_logger_request_module(NFPROTO_IPV4, NF_LOG_TYPE_LOG);
+       nf_logger_request_module(NFPROTO_IPV6, NF_LOG_TYPE_LOG);
+       nf_logger_request_module(NFPROTO_ARP, NF_LOG_TYPE_LOG);
+
+       ret = register_pernet_subsys(&nf_log_bridge_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
+       return 0;
+}
+
+static void __exit nf_log_bridge_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_bridge_net_ops);
+       nf_log_unregister(&nf_bridge_logger);
+}
+
+module_init(nf_log_bridge_init);
+module_exit(nf_log_bridge_exit);
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter bridge packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);
index e8437094d15fc8a7aa342969f9c6ebd7afeb740a..43f750e88e199e451d7de049255257b46cd947ab 100644 (file)
@@ -908,8 +908,7 @@ static int caif_release(struct socket *sock)
        sock->sk = NULL;
 
        WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
-       if (cf_sk->debugfs_socket_dir != NULL)
-               debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+       debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
 
        lock_sock(&(cf_sk->sk));
        sk->sk_state = CAIF_DISCONNECTED;
index 0f455227da8320bdcfe42daa61673701b25224fa..f5afda1abc76fe908e8cce160ad032870972424c 100644 (file)
@@ -547,7 +547,6 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
        default:
                pr_err("Unrecognized Control Frame\n");
                goto error;
-               break;
        }
        ret = 0;
 error:
index 367a586d0c8a851ad0b5ea57fab32dfe894d662c..e1b7cfaccd65243fea705868e6a7f8d784acef72 100644 (file)
@@ -1085,6 +1085,7 @@ static int dev_get_valid_name(struct net *net,
  */
 int dev_change_name(struct net_device *dev, const char *newname)
 {
+       unsigned char old_assign_type;
        char oldname[IFNAMSIZ];
        int err = 0;
        int ret;
@@ -1112,10 +1113,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
                return err;
        }
 
+       if (oldname[0] && !strchr(oldname, '%'))
+               netdev_info(dev, "renamed from %s\n", oldname);
+
+       old_assign_type = dev->name_assign_type;
+       dev->name_assign_type = NET_NAME_RENAMED;
+
 rollback:
        ret = device_rename(&dev->dev, dev->name);
        if (ret) {
                memcpy(dev->name, oldname, IFNAMSIZ);
+               dev->name_assign_type = old_assign_type;
                write_seqcount_end(&devnet_rename_seq);
                return ret;
        }
@@ -1144,6 +1152,8 @@ rollback:
                        write_seqcount_begin(&devnet_rename_seq);
                        memcpy(dev->name, oldname, IFNAMSIZ);
                        memcpy(oldname, newname, IFNAMSIZ);
+                       dev->name_assign_type = old_assign_type;
+                       old_assign_type = NET_NAME_RENAMED;
                        goto rollback;
                } else {
                        pr_err("%s: name change rollback failed: %d\n",
@@ -2414,8 +2424,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 
                skb_warn_bad_offload(skb);
 
-               if (skb_header_cloned(skb) &&
-                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
                        return ERR_PTR(err);
        }
 
@@ -2745,8 +2755,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
-        * This permits __QDISC_STATE_RUNNING owner to get the lock more often
-        * and dequeue packets faster.
+        * This permits __QDISC___STATE_RUNNING owner to get the lock more
+        * often and dequeue packets faster.
         */
        contended = qdisc_is_running(q);
        if (unlikely(contended))
@@ -5440,13 +5450,9 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
         */
 
        ret = 0;
-       if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
+       if ((old_flags ^ flags) & IFF_UP)
                ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
 
-               if (!ret)
-                       dev_set_rx_mode(dev);
-       }
-
        if ((flags ^ dev->gflags) & IFF_PROMISC) {
                int inc = (flags & IFF_PROMISC) ? 1 : -1;
                unsigned int old_flags = dev->flags;
@@ -6446,17 +6452,19 @@ void netdev_freemem(struct net_device *dev)
 
 /**
  *     alloc_netdev_mqs - allocate network device
- *     @sizeof_priv:   size of private data to allocate space for
- *     @name:          device name format string
- *     @setup:         callback to initialize device
- *     @txqs:          the number of TX subqueues to allocate
- *     @rxqs:          the number of RX subqueues to allocate
+ *     @sizeof_priv:           size of private data to allocate space for
+ *     @name:                  device name format string
+ *     @name_assign_type:      origin of device name
+ *     @setup:                 callback to initialize device
+ *     @txqs:                  the number of TX subqueues to allocate
+ *     @rxqs:                  the number of RX subqueues to allocate
  *
  *     Allocates a struct net_device with private data area for driver use
  *     and performs basic initialization.  Also allocates subqueue structs
  *     for each queue on the device.
  */
 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+               unsigned char name_assign_type,
                void (*setup)(struct net_device *),
                unsigned int txqs, unsigned int rxqs)
 {
@@ -6535,6 +6543,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 #endif
 
        strcpy(dev->name, name);
+       dev->name_assign_type = name_assign_type;
        dev->group = INIT_NETDEV_GROUP;
        if (!dev->ethtool_ops)
                dev->ethtool_ops = &default_ethtool_ops;
@@ -6946,12 +6955,14 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
        if (dev && dev->dev.parent) {
                r = dev_printk_emit(level[1] - '0',
                                    dev->dev.parent,
-                                   "%s %s %s: %pV",
+                                   "%s %s %s%s: %pV",
                                    dev_driver_string(dev->dev.parent),
                                    dev_name(dev->dev.parent),
-                                   netdev_name(dev), vaf);
+                                   netdev_name(dev), netdev_reg_state(dev),
+                                   vaf);
        } else if (dev) {
-               r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
+               r = printk("%s%s%s: %pV", level, netdev_name(dev),
+                          netdev_reg_state(dev), vaf);
        } else {
                r = printk("%s(NULL net_device): %pV", level, vaf);
        }
@@ -7103,7 +7114,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
        rtnl_lock_unregistering(net_list);
        list_for_each_entry(net, net_list, exit_list) {
                for_each_netdev_reverse(net, dev) {
-                       if (dev->rtnl_link_ops)
+                       if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
                                dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
                        else
                                unregister_netdevice_queue(dev, &dev_kill_list);
index e70301eb7a4a0472d1ade5bd3d318fe453bd2b8b..50f9a9db57924decc06d50296b77dcfe1ac5fcc9 100644 (file)
@@ -289,10 +289,8 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
        switch (info->genlhdr->cmd) {
        case NET_DM_CMD_START:
                return set_all_monitor_traces(TRACE_ON);
-               break;
        case NET_DM_CMD_STOP:
                return set_all_monitor_traces(TRACE_OFF);
-               break;
        }
 
        return -ENOTSUPP;
index 1dbf6462f766541a20db14a3917c9f0813e73652..b90ae7fb3b893da7d2e42bd293e460644456dc2f 100644 (file)
@@ -84,15 +84,6 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
        return NULL;
 }
 
-static inline void *load_pointer(const struct sk_buff *skb, int k,
-                                unsigned int size, void *buffer)
-{
-       if (k >= 0)
-               return skb_header_pointer(skb, k, size, buffer);
-
-       return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
 /**
  *     sk_filter - run a packet through a socket filter
  *     @sk: sock associated with &sk_buff
@@ -537,7 +528,7 @@ load_word:
                 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
                 */
 
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
                if (likely(ptr != NULL)) {
                        BPF_R0 = get_unaligned_be32(ptr);
                        CONT;
@@ -547,7 +538,7 @@ load_word:
        LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
                off = IMM;
 load_half:
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
                if (likely(ptr != NULL)) {
                        BPF_R0 = get_unaligned_be16(ptr);
                        CONT;
@@ -557,7 +548,7 @@ load_half:
        LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
                off = IMM;
 load_byte:
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
                if (likely(ptr != NULL)) {
                        BPF_R0 = *(u8 *)ptr;
                        CONT;
@@ -1094,7 +1085,7 @@ err:
  * a cell if not previously written, and we check all branches to be sure
  * a malicious user doesn't try to abuse us.
  */
-static int check_load_and_stores(struct sock_filter *filter, int flen)
+static int check_load_and_stores(const struct sock_filter *filter, int flen)
 {
        u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
        int pc, ret = 0;
@@ -1227,7 +1218,7 @@ static bool chk_code_allowed(u16 code_to_probe)
  *
  * Returns 0 if the rule set is legal or -EINVAL if not.
  */
-int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
+int sk_chk_filter(const struct sock_filter *filter, unsigned int flen)
 {
        bool anc_found;
        int pc;
@@ -1237,7 +1228,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 
        /* Check the filter code now */
        for (pc = 0; pc < flen; pc++) {
-               struct sock_filter *ftest = &filter[pc];
+               const struct sock_filter *ftest = &filter[pc];
 
                /* May we actually operate on this code? */
                if (!chk_code_allowed(ftest->code))
index 107ed12a5323ab20e796042ae671e7b60ac0b488..5f362c1d03322692da59509c7d594f72255330b8 100644 (file)
@@ -80,6 +80,8 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
+               __be32 flow_label;
+
 ipv6:
                iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
                if (!iph)
@@ -89,6 +91,21 @@ ipv6:
                flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
                flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
                nhoff += sizeof(struct ipv6hdr);
+
+               flow_label = ip6_flowlabel(iph);
+               if (flow_label) {
+                       /* Awesome, IPv6 packet has a flow label so we can
+                        * use that to represent the ports without any
+                        * further dissection.
+                        */
+                       flow->n_proto = proto;
+                       flow->ip_proto = ip_proto;
+                       flow->ports = flow_label;
+                       flow->thoff = (u16)nhoff;
+
+                       return true;
+               }
+
                break;
        }
        case htons(ETH_P_8021AD):
@@ -175,6 +192,7 @@ ipv6:
                break;
        }
 
+       flow->n_proto = proto;
        flow->ip_proto = ip_proto;
        flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
        flow->thoff = (u16) nhoff;
@@ -195,12 +213,33 @@ static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
        return jhash_3words(a, b, c, hashrnd);
 }
 
-static __always_inline u32 __flow_hash_1word(u32 a)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
 {
-       __flow_hash_secret_init();
-       return jhash_1word(a, hashrnd);
+       u32 hash;
+
+       /* get a consistent hash (same value on both flow directions) */
+       if (((__force u32)keys->dst < (__force u32)keys->src) ||
+           (((__force u32)keys->dst == (__force u32)keys->src) &&
+            ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
+               swap(keys->dst, keys->src);
+               swap(keys->port16[0], keys->port16[1]);
+       }
+
+       hash = __flow_hash_3words((__force u32)keys->dst,
+                                 (__force u32)keys->src,
+                                 (__force u32)keys->ports);
+       if (!hash)
+               hash = 1;
+
+       return hash;
 }
 
+u32 flow_hash_from_keys(struct flow_keys *keys)
+{
+       return __flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(flow_hash_from_keys);
+
 /*
  * __skb_get_hash: calculate a flow hash based on src/dst addresses
  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
@@ -210,7 +249,6 @@ static __always_inline u32 __flow_hash_1word(u32 a)
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
-       u32 hash;
 
        if (!skb_flow_dissect(skb, &keys))
                return;
@@ -218,21 +256,9 @@ void __skb_get_hash(struct sk_buff *skb)
        if (keys.ports)
                skb->l4_hash = 1;
 
-       /* get a consistent hash (same value on both flow directions) */
-       if (((__force u32)keys.dst < (__force u32)keys.src) ||
-           (((__force u32)keys.dst == (__force u32)keys.src) &&
-            ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
-               swap(keys.dst, keys.src);
-               swap(keys.port16[0], keys.port16[1]);
-       }
-
-       hash = __flow_hash_3words((__force u32)keys.dst,
-                                 (__force u32)keys.src,
-                                 (__force u32)keys.ports);
-       if (!hash)
-               hash = 1;
+       skb->sw_hash = 1;
 
-       skb->hash = hash;
+       skb->hash = __flow_hash_from_keys(&keys);
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -240,7 +266,7 @@ EXPORT_SYMBOL(__skb_get_hash);
  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  * to be used as a distribution range.
  */
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues)
 {
        u32 hash;
@@ -260,13 +286,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                qcount = dev->tc_to_txq[tc].count;
        }
 
-       if (skb->sk && skb->sk->sk_hash)
-               hash = skb->sk->sk_hash;
-       else
-               hash = (__force u16) skb->protocol;
-       hash = __flow_hash_1word(hash);
-
-       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+       return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -338,17 +358,10 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
                if (map) {
                        if (map->len == 1)
                                queue_index = map->queues[0];
-                       else {
-                               u32 hash;
-                               if (skb->sk && skb->sk->sk_hash)
-                                       hash = skb->sk->sk_hash;
-                               else
-                                       hash = (__force u16) skb->protocol ^
-                                           skb->hash;
-                               hash = __flow_hash_1word(hash);
+                       else
                                queue_index = map->queues[
-                                   ((u64)hash * map->len) >> 32];
-                       }
+                                   ((u64)skb_get_hash(skb) * map->len) >> 32];
+
                        if (unlikely(queue_index >= dev->real_num_tx_queues))
                                queue_index = -1;
                }
index 1cac29ebb05ba5040c5ca0a9e90ff080a0b1ea76..7752f2ad49a5eacfbb686f7e4f5df30db69ba0a7 100644 (file)
@@ -112,6 +112,25 @@ NETDEVICE_SHOW_RO(ifindex, fmt_dec);
 NETDEVICE_SHOW_RO(type, fmt_dec);
 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
 
+static ssize_t format_name_assign_type(const struct net_device *net, char *buf)
+{
+       return sprintf(buf, fmt_dec, net->name_assign_type);
+}
+
+static ssize_t name_assign_type_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       struct net_device *net = to_net_dev(dev);
+       ssize_t ret = -EINVAL;
+
+       if (net->name_assign_type != NET_NAME_UNKNOWN)
+               ret = netdev_show(dev, attr, buf, format_name_assign_type);
+
+       return ret;
+}
+static DEVICE_ATTR_RO(name_assign_type);
+
 /* use same locking rules as GIFHWADDR ioctl's */
 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
                            char *buf)
@@ -387,6 +406,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_dev_port.attr,
        &dev_attr_iflink.attr,
        &dev_attr_ifindex.attr,
+       &dev_attr_name_assign_type.attr,
        &dev_attr_addr_assign_type.attr,
        &dev_attr_addr_len.attr,
        &dev_attr_link_mode.attr,
index e33937fb32a0ceac413ac369d2991a92c3428235..907fb5e36c02e54794734abe4dc1225bf9af9c7d 100644 (file)
@@ -822,7 +822,8 @@ void __netpoll_cleanup(struct netpoll *np)
 
                RCU_INIT_POINTER(np->dev->npinfo, NULL);
                call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
-       }
+       } else
+               RCU_INIT_POINTER(np->dev->npinfo, NULL);
 }
 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
index fc17a9d309ac028fc61ac7db6e35a05c77513a24..8b849ddfef2e743676fe35642cd49d36188dd74d 100644 (file)
@@ -69,8 +69,9 @@
  * for running devices in the if_list and sends packets until count is 0 it
  * also the thread checks the thread->control which is used for inter-process
  * communication. controlling process "posts" operations to the threads this
- * way. The if_lock should be possible to remove when add/rem_device is merged
- * into this too.
+ * way.
+ * The if_list is RCU protected, and the if_lock remains to protect updating
+ * of if_list, from "add_device" as it invoked from userspace (via proc write).
  *
  * By design there should only be *one* "controlling" process. In practice
  * multiple write accesses gives unpredictable result. Understood by "write"
 #define T_REMDEVALL   (1<<2)   /* Remove all devs */
 #define T_REMDEV      (1<<3)   /* Remove one dev */
 
-/* If lock -- can be removed after some work */
+/* If lock -- protects updating of if_list */
 #define   if_lock(t)           spin_lock(&(t->if_lock));
 #define   if_unlock(t)           spin_unlock(&(t->if_lock));
 
@@ -241,6 +242,7 @@ struct pktgen_dev {
        struct proc_dir_entry *entry;   /* proc file */
        struct pktgen_thread *pg_thread;/* the owner */
        struct list_head list;          /* chaining in the thread's run-queue */
+       struct rcu_head  rcu;           /* freed by RCU */
 
        int running;            /* if false, the test will stop */
 
@@ -802,7 +804,6 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen)
                case '\t':
                case ' ':
                        goto done_str;
-                       break;
                default:
                        break;
                }
@@ -1737,14 +1738,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
 
        seq_puts(seq, "Running: ");
 
-       if_lock(t);
-       list_for_each_entry(pkt_dev, &t->if_list, list)
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
        seq_puts(seq, "\nStopped: ");
 
-       list_for_each_entry(pkt_dev, &t->if_list, list)
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
@@ -1753,7 +1754,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        else
                seq_puts(seq, "\nResult: NA\n");
 
-       if_unlock(t);
+       rcu_read_unlock();
 
        return 0;
 }
@@ -1878,10 +1879,8 @@ static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
                pkt_dev = pktgen_find_dev(t, ifname, exact);
                if (pkt_dev) {
                        if (remove) {
-                               if_lock(t);
                                pkt_dev->removal_mark = 1;
                                t->control |= T_REMDEV;
-                               if_unlock(t);
                        }
                        break;
                }
@@ -1931,7 +1930,8 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
        list_for_each_entry(t, &pn->pktgen_threads, th_list) {
                struct pktgen_dev *pkt_dev;
 
-               list_for_each_entry(pkt_dev, &t->if_list, list) {
+               rcu_read_lock();
+               list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                        if (pkt_dev->odev != dev)
                                continue;
 
@@ -1946,6 +1946,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
                                       dev->name);
                        break;
                }
+               rcu_read_unlock();
        }
 }
 
@@ -2997,8 +2998,8 @@ static void pktgen_run(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
 
                /*
                 * setup odev and create initial packet.
@@ -3007,18 +3008,18 @@ static void pktgen_run(struct pktgen_thread *t)
 
                if (pkt_dev->odev) {
                        pktgen_clear_counters(pkt_dev);
-                       pkt_dev->running = 1;   /* Cranke yeself! */
                        pkt_dev->skb = NULL;
                        pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
 
                        set_pkt_overhead(pkt_dev);
 
                        strcpy(pkt_dev->result, "Starting");
+                       pkt_dev->running = 1;   /* Cranke yeself! */
                        started++;
                } else
                        strcpy(pkt_dev->result, "Error starting");
        }
-       if_unlock(t);
+       rcu_read_unlock();
        if (started)
                t->control &= ~(T_STOP);
 }
@@ -3041,27 +3042,25 @@ static int thread_is_running(const struct pktgen_thread *t)
 {
        const struct pktgen_dev *pkt_dev;
 
-       list_for_each_entry(pkt_dev, &t->if_list, list)
-               if (pkt_dev->running)
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
+               if (pkt_dev->running) {
+                       rcu_read_unlock();
                        return 1;
+               }
+       rcu_read_unlock();
        return 0;
 }
 
 static int pktgen_wait_thread_run(struct pktgen_thread *t)
 {
-       if_lock(t);
-
        while (thread_is_running(t)) {
 
-               if_unlock(t);
-
                msleep_interruptible(100);
 
                if (signal_pending(current))
                        goto signal;
-               if_lock(t);
        }
-       if_unlock(t);
        return 1;
 signal:
        return 0;
@@ -3166,10 +3165,10 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
                return -EINVAL;
        }
 
+       pkt_dev->running = 0;
        kfree_skb(pkt_dev->skb);
        pkt_dev->skb = NULL;
        pkt_dev->stopped_at = ktime_get();
-       pkt_dev->running = 0;
 
        show_results(pkt_dev, nr_frags);
 
@@ -3180,9 +3179,8 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
 {
        struct pktgen_dev *pkt_dev, *best = NULL;
 
-       if_lock(t);
-
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                if (!pkt_dev->running)
                        continue;
                if (best == NULL)
@@ -3190,7 +3188,8 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
                else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
                        best = pkt_dev;
        }
-       if_unlock(t);
+       rcu_read_unlock();
+
        return best;
 }
 
@@ -3200,13 +3199,13 @@ static void pktgen_stop(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
+       rcu_read_lock();
 
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                pktgen_stop_device(pkt_dev);
        }
 
-       if_unlock(t);
+       rcu_read_unlock();
 }
 
 /*
@@ -3220,8 +3219,6 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
-
        list_for_each_safe(q, n, &t->if_list) {
                cur = list_entry(q, struct pktgen_dev, list);
 
@@ -3235,8 +3232,6 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
 
                break;
        }
-
-       if_unlock(t);
 }
 
 static void pktgen_rem_all_ifs(struct pktgen_thread *t)
@@ -3248,8 +3243,6 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
 
        /* Remove all devices, free mem */
 
-       if_lock(t);
-
        list_for_each_safe(q, n, &t->if_list) {
                cur = list_entry(q, struct pktgen_dev, list);
 
@@ -3258,8 +3251,6 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
 
                pktgen_remove_device(t, cur);
        }
-
-       if_unlock(t);
 }
 
 static void pktgen_rem_thread(struct pktgen_thread *t)
@@ -3407,10 +3398,10 @@ static int pktgen_thread_worker(void *arg)
 
        pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
 
-       set_current_state(TASK_INTERRUPTIBLE);
-
        set_freezable();
 
+       __set_current_state(TASK_RUNNING);
+
        while (!kthread_should_stop()) {
                pkt_dev = next_to_run(t);
 
@@ -3424,8 +3415,6 @@ static int pktgen_thread_worker(void *arg)
                        continue;
                }
 
-               __set_current_state(TASK_RUNNING);
-
                if (likely(pkt_dev)) {
                        pktgen_xmit(pkt_dev);
 
@@ -3456,9 +3445,8 @@ static int pktgen_thread_worker(void *arg)
                }
 
                try_to_freeze();
-
-               set_current_state(TASK_INTERRUPTIBLE);
        }
+       set_current_state(TASK_INTERRUPTIBLE);
 
        pr_debug("%s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
@@ -3485,8 +3473,8 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
        struct pktgen_dev *p, *pkt_dev = NULL;
        size_t len = strlen(ifname);
 
-       if_lock(t);
-       list_for_each_entry(p, &t->if_list, list)
+       rcu_read_lock();
+       list_for_each_entry_rcu(p, &t->if_list, list)
                if (strncmp(p->odevname, ifname, len) == 0) {
                        if (p->odevname[len]) {
                                if (exact || p->odevname[len] != '@')
@@ -3496,7 +3484,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
                        break;
                }
 
-       if_unlock(t);
+       rcu_read_unlock();
        pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
        return pkt_dev;
 }
@@ -3510,6 +3498,12 @@ static int add_dev_to_thread(struct pktgen_thread *t,
 {
        int rv = 0;
 
+       /* This function cannot be called concurrently, as its called
+        * under pktgen_thread_lock mutex, but it can run from
+        * userspace on another CPU than the kthread.  The if_lock()
+        * is used here to sync with concurrent instances of
+        * _rem_dev_from_if_list() invoked via kthread, which is also
+        * updating the if_list */
        if_lock(t);
 
        if (pkt_dev->pg_thread) {
@@ -3518,9 +3512,9 @@ static int add_dev_to_thread(struct pktgen_thread *t,
                goto out;
        }
 
-       list_add(&pkt_dev->list, &t->if_list);
-       pkt_dev->pg_thread = t;
        pkt_dev->running = 0;
+       pkt_dev->pg_thread = t;
+       list_add_rcu(&pkt_dev->list, &t->if_list);
 
 out:
        if_unlock(t);
@@ -3675,11 +3669,13 @@ static void _rem_dev_from_if_list(struct pktgen_thread *t,
        struct list_head *q, *n;
        struct pktgen_dev *p;
 
+       if_lock(t);
        list_for_each_safe(q, n, &t->if_list) {
                p = list_entry(q, struct pktgen_dev, list);
                if (p == pkt_dev)
-                       list_del(&p->list);
+                       list_del_rcu(&p->list);
        }
+       if_unlock(t);
 }
 
 static int pktgen_remove_device(struct pktgen_thread *t,
@@ -3699,20 +3695,22 @@ static int pktgen_remove_device(struct pktgen_thread *t,
                pkt_dev->odev = NULL;
        }
 
-       /* And update the thread if_list */
-
-       _rem_dev_from_if_list(t, pkt_dev);
-
+       /* Remove proc before if_list entry, because add_device uses
+        * list to determine if interface already exist, avoid race
+        * with proc_create_data() */
        if (pkt_dev->entry)
                proc_remove(pkt_dev->entry);
 
+       /* And update the thread if_list */
+       _rem_dev_from_if_list(t, pkt_dev);
+
 #ifdef CONFIG_XFRM
        free_SAs(pkt_dev);
 #endif
        vfree(pkt_dev->flows);
        if (pkt_dev->page)
                put_page(pkt_dev->page);
-       kfree(pkt_dev);
+       kfree_rcu(pkt_dev, rcu);
        return 0;
 }
 
@@ -3812,6 +3810,7 @@ static void __exit pg_cleanup(void)
 {
        unregister_netdevice_notifier(&pktgen_notifier_block);
        unregister_pernet_subsys(&pg_net_ops);
+       /* Don't need rcu_barrier() due to use of kfree_rcu() */
 }
 
 module_init(pg_init);
index d3027a73fd4bbc152f13011cb09335af365f19dc..12ab7b4be60932700901f4f3fb9d159f3ddb7ebd 100644 (file)
  * test_8021q:
  *   jneq #0x8100, test_ieee1588   ; ETH_P_8021Q ?
  *   ldh [16]                      ; load inner type
- *   jneq #0x88f7, drop_ieee1588   ; ETH_P_1588 ?
+ *   jneq #0x88f7, test_8021q_ipv4 ; ETH_P_1588 ?
  *   ldb [18]                      ; load payload
  *   and #0x8                      ; as we don't have ports here, test
  *   jneq #0x0, drop_ieee1588      ; for PTP_GEN_BIT and drop these
  *   ldh [18]                      ; reload payload
  *   and #0xf                      ; mask PTP_CLASS_VMASK
- *   or #0x40                      ; PTP_CLASS_V2_VLAN
+ *   or #0x70                      ; PTP_CLASS_VLAN|PTP_CLASS_L2
+ *   ret a                         ; return PTP class
+ *
+ * ; PTP over UDP over IPv4 over 802.1Q over Ethernet
+ * test_8021q_ipv4:
+ *   jneq #0x800, test_8021q_ipv6  ; ETH_P_IP ?
+ *   ldb [27]                      ; load proto
+ *   jneq #17, drop_8021q_ipv4     ; IPPROTO_UDP ?
+ *   ldh [24]                      ; load frag offset field
+ *   jset #0x1fff, drop_8021q_ipv4; don't allow fragments
+ *   ldxb 4*([18]&0xf)             ; load IP header len
+ *   ldh [x + 20]                  ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv4    ; is port PTP_EV_PORT ?
+ *   ldh [x + 26]                  ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x50                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV4
+ *   ret a                         ; return PTP class
+ *   drop_8021q_ipv4: ret #0x0     ; PTP_CLASS_NONE
+ *
+ * ; PTP over UDP over IPv6 over 802.1Q over Ethernet
+ * test_8021q_ipv6:
+ *   jneq #0x86dd, drop_8021q_ipv6 ; ETH_P_IPV6 ?
+ *   ldb [24]                      ; load proto
+ *   jneq #17, drop_8021q_ipv6           ; IPPROTO_UDP ?
+ *   ldh [60]                      ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv6          ; is port PTP_EV_PORT ?
+ *   ldh [66]                      ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x60                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV6
  *   ret a                         ; return PTP class
+ *   drop_8021q_ipv6: ret #0x0     ; PTP_CLASS_NONE
  *
  * ; PTP over Ethernet
  * test_ieee1588:
@@ -113,16 +142,39 @@ void __init ptp_classifier_init(void)
                { 0x44,  0,  0, 0x00000020 },
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
-               { 0x15,  0,  9, 0x00008100 },
+               { 0x15,  0, 32, 0x00008100 },
                { 0x28,  0,  0, 0x00000010 },
-               { 0x15,  0, 15, 0x000088f7 },
+               { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x00000008 },
-               { 0x15,  0, 12, 0x00000000 },
+               { 0x15,  0, 35, 0x00000000 },
                { 0x28,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x0000000f },
-               { 0x44,  0,  0, 0x00000040 },
+               { 0x44,  0,  0, 0x00000070 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x15,  0, 12, 0x00000800 },
+               { 0x30,  0,  0, 0x0000001b },
+               { 0x15,  0,  9, 0x00000011 },
+               { 0x28,  0,  0, 0x00000018 },
+               { 0x45,  7,  0, 0x00001fff },
+               { 0xb1,  0,  0, 0x00000012 },
+               { 0x48,  0,  0, 0x00000014 },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x48,  0,  0, 0x0000001a },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000050 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
+               { 0x15,  0,  8, 0x000086dd },
+               { 0x30,  0,  0, 0x00000018 },
+               { 0x15,  0,  6, 0x00000011 },
+               { 0x28,  0,  0, 0x0000003c },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x28,  0,  0, 0x00000042 },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000060 },
                { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
                { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x0000000e },
                { 0x54,  0,  0, 0x00000008 },
index 467f326126e0eb913416c9bd2f847816d6d98948..04db318e6218d93100bd07c12bebc38e2bc1f138 100644 (file)
@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
                      unsigned int nr_table_entries)
 {
        size_t lopt_size = sizeof(struct listen_sock);
-       struct listen_sock *lopt;
+       struct listen_sock *lopt = NULL;
 
        nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
        nr_table_entries = max_t(u32, nr_table_entries, 8);
        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
        lopt_size += nr_table_entries * sizeof(struct request_sock *);
-       if (lopt_size > PAGE_SIZE)
+
+       if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+               lopt = kzalloc(lopt_size, GFP_KERNEL |
+                                         __GFP_NOWARN |
+                                         __GFP_NORETRY);
+       if (!lopt)
                lopt = vzalloc(lopt_size);
-       else
-               lopt = kzalloc(lopt_size, GFP_KERNEL);
-       if (lopt == NULL)
+       if (!lopt)
                return -ENOMEM;
 
-       for (lopt->max_qlen_log = 3;
-            (1 << lopt->max_qlen_log) < nr_table_entries;
-            lopt->max_qlen_log++);
-
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
        rwlock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = NULL;
        lopt->nr_table_entries = nr_table_entries;
+       lopt->max_qlen_log = ilog2(nr_table_entries);
 
        write_lock_bh(&queue->syn_wait_lock);
        queue->listen_opt = lopt;
@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
 
 void __reqsk_queue_destroy(struct request_sock_queue *queue)
 {
-       struct listen_sock *lopt;
-       size_t lopt_size;
-
-       /*
-        * this is an error recovery path only
-        * no locking needed and the lopt is not NULL
-        */
-
-       lopt = queue->listen_opt;
-       lopt_size = sizeof(struct listen_sock) +
-               lopt->nr_table_entries * sizeof(struct request_sock *);
-
-       if (lopt_size > PAGE_SIZE)
-               vfree(lopt);
-       else
-               kfree(lopt);
+       /* This is an error recovery path only, no locking needed */
+       kvfree(queue->listen_opt);
 }
 
 static inline struct listen_sock *reqsk_queue_yank_listen_sk(
@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
 {
        /* make all the listen_opt local to us */
        struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
-       size_t lopt_size = sizeof(struct listen_sock) +
-               lopt->nr_table_entries * sizeof(struct request_sock *);
 
        if (lopt->qlen != 0) {
                unsigned int i;
@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
        }
 
        WARN_ON(lopt->qlen != 0);
-       if (lopt_size > PAGE_SIZE)
-               vfree(lopt);
-       else
-               kfree(lopt);
+       kvfree(lopt);
 }
 
 /*
index 1063996f8317fb95fea964e095ae2e372ceb74be..8d39071f32d76a41d10c5b48e06f4cf59dca1ee5 100644 (file)
@@ -299,7 +299,12 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
        if (rtnl_link_ops_get(ops->kind))
                return -EEXIST;
 
-       if (!ops->dellink)
+       /* The check for setup is here because if ops
+        * does not have that filled up, it is not possible
+        * to use the ops for creating device. So do not
+        * fill up dellink as well. That disables rtnl_dellink.
+        */
+       if (ops->setup && !ops->dellink)
                ops->dellink = unregister_netdevice_queue;
 
        list_add_tail(&ops->list, &link_ops);
@@ -1777,7 +1782,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                return -ENODEV;
 
        ops = dev->rtnl_link_ops;
-       if (!ops)
+       if (!ops || !ops->dellink)
                return -EOPNOTSUPP;
 
        ops->dellink(dev, &list_kill);
@@ -1805,7 +1810,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
 EXPORT_SYMBOL(rtnl_configure_link);
 
 struct net_device *rtnl_create_link(struct net *net,
-       char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
+       char *ifname, unsigned char name_assign_type,
+       const struct rtnl_link_ops *ops, struct nlattr *tb[])
 {
        int err;
        struct net_device *dev;
@@ -1823,8 +1829,8 @@ struct net_device *rtnl_create_link(struct net *net,
                num_rx_queues = ops->get_num_rx_queues();
 
        err = -ENOMEM;
-       dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
-                              num_tx_queues, num_rx_queues);
+       dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
+                              ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
                goto err;
 
@@ -1889,6 +1895,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        char ifname[IFNAMSIZ];
        struct nlattr *tb[IFLA_MAX+1];
        struct nlattr *linkinfo[IFLA_INFO_MAX+1];
+       unsigned char name_assign_type = NET_NAME_USER;
        int err;
 
 #ifdef CONFIG_MODULES
@@ -2038,14 +2045,19 @@ replay:
                        return -EOPNOTSUPP;
                }
 
-               if (!ifname[0])
+               if (!ops->setup)
+                       return -EOPNOTSUPP;
+
+               if (!ifname[0]) {
                        snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+                       name_assign_type = NET_NAME_ENUM;
+               }
 
                dest_net = rtnl_link_get_net(net, tb);
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
-               dev = rtnl_create_link(dest_net, ifname, ops, tb);
+               dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
                if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
                        goto out;
@@ -2380,22 +2392,20 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
                     struct net_device *dev,
                     const unsigned char *addr)
 {
-       int err = -EOPNOTSUPP;
+       int err = -EINVAL;
 
        /* If aging addresses are supported device will need to
         * implement its own handler for this.
         */
        if (!(ndm->ndm_state & NUD_PERMANENT)) {
                pr_info("%s: FDB only supports static addresses\n", dev->name);
-               return -EINVAL;
+               return err;
        }
 
        if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
                err = dev_uc_del(dev, addr);
        else if (is_multicast_ether_addr(addr))
                err = dev_mc_del(dev, addr);
-       else
-               err = -EINVAL;
 
        return err;
 }
@@ -2509,6 +2519,7 @@ skip:
 int ndo_dflt_fdb_dump(struct sk_buff *skb,
                      struct netlink_callback *cb,
                      struct net_device *dev,
+                     struct net_device *filter_dev,
                      int idx)
 {
        int err;
@@ -2526,28 +2537,72 @@ EXPORT_SYMBOL(ndo_dflt_fdb_dump);
 
 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       int idx = 0;
-       struct net *net = sock_net(skb->sk);
        struct net_device *dev;
+       struct nlattr *tb[IFLA_MAX+1];
+       struct net_device *bdev = NULL;
+       struct net_device *br_dev = NULL;
+       const struct net_device_ops *ops = NULL;
+       const struct net_device_ops *cops = NULL;
+       struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
+       struct net *net = sock_net(skb->sk);
+       int brport_idx = 0;
+       int br_idx = 0;
+       int idx = 0;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
-               if (dev->priv_flags & IFF_BRIDGE_PORT) {
-                       struct net_device *br_dev;
-                       const struct net_device_ops *ops;
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+                       ifla_policy) == 0) {
+               if (tb[IFLA_MASTER])
+                       br_idx = nla_get_u32(tb[IFLA_MASTER]);
+       }
+
+       brport_idx = ifm->ifi_index;
 
-                       br_dev = netdev_master_upper_dev_get(dev);
-                       ops = br_dev->netdev_ops;
-                       if (ops->ndo_fdb_dump)
-                               idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
+       if (br_idx) {
+               br_dev = __dev_get_by_index(net, br_idx);
+               if (!br_dev)
+                       return -ENODEV;
+
+               ops = br_dev->netdev_ops;
+               bdev = br_dev;
+       }
+
+       for_each_netdev(net, dev) {
+               if (brport_idx && (dev->ifindex != brport_idx))
+                       continue;
+
+               if (!br_idx) { /* user did not specify a specific bridge */
+                       if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                               br_dev = netdev_master_upper_dev_get(dev);
+                               cops = br_dev->netdev_ops;
+                       }
+
+                       bdev = dev;
+               } else {
+                       if (dev != br_dev &&
+                           !(dev->priv_flags & IFF_BRIDGE_PORT))
+                               continue;
+
+                       if (br_dev != netdev_master_upper_dev_get(dev) &&
+                           !(dev->priv_flags & IFF_EBRIDGE))
+                               continue;
+
+                       bdev = br_dev;
+                       cops = ops;
+               }
+
+               if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                       if (cops && cops->ndo_fdb_dump)
+                               idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
+                                                        idx);
                }
 
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
                if (dev->netdev_ops->ndo_fdb_dump)
-                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
-               else
-                       idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, bdev, dev,
+                                                           idx);
+
+               cops = NULL;
        }
-       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
index 6521dfd8b7c8563aa5cc7ccea8e5d6da05a7a586..a8770391ea5bfc0db85135cf913c84d09f55cedb 100644 (file)
@@ -43,31 +43,22 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
                return;
 
        type = classify(skb);
+       if (type == PTP_CLASS_NONE)
+               return;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->txtstamp)) {
+               if (!atomic_inc_not_zero(&sk->sk_refcnt))
+                       return;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->txtstamp)) {
-                       if (!atomic_inc_not_zero(&sk->sk_refcnt))
-                               return;
-
-                       clone = skb_clone(skb, GFP_ATOMIC);
-                       if (!clone) {
-                               sock_put(sk);
-                               return;
-                       }
-
-                       clone->sk = sk;
-                       phydev->drv->txtstamp(phydev, clone, type);
+               clone = skb_clone(skb, GFP_ATOMIC);
+               if (!clone) {
+                       sock_put(sk);
+                       return;
                }
-               break;
-       default:
-               break;
+
+               clone->sk = sk;
+               phydev->drv->txtstamp(phydev, clone, type);
        }
 }
 EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
@@ -114,20 +105,12 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
 
        __skb_pull(skb, ETH_HLEN);
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->rxtstamp))
-                       return phydev->drv->rxtstamp(phydev, skb, type);
-               break;
-       default:
-               break;
-       }
+       if (type == PTP_CLASS_NONE)
+               return false;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->rxtstamp))
+               return phydev->drv->rxtstamp(phydev, skb, type);
 
        return false;
 }
index f8b98d89c28527f049b4c3132aa7f0b412cfa0ef..c34af7a1d2d47f3f79fb604a9dedc21e8aee97ef 100644 (file)
@@ -471,7 +471,11 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
        id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
 
        if (netdev->dcbnl_ops->getapp) {
-               up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+               ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+               if (ret < 0)
+                       return ret;
+               else
+                       up = ret;
        } else {
                struct dcb_app app = {
                                        .selector = idtype,
@@ -538,6 +542,8 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
 
        if (netdev->dcbnl_ops->setapp) {
                ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
+               if (ret < 0)
+                       return ret;
        } else {
                struct dcb_app app;
                app.selector = idtype;
index 4db3c2a1679cf24d668f0970d448af37aef8acda..04cb17d4b0cea9e9a0eac62f7e7f7135d7b9337c 100644 (file)
@@ -386,7 +386,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
-       req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
+       req = inet_reqsk_alloc(&dccp6_request_sock_ops);
        if (req == NULL)
                goto drop;
 
index c69eb9c4fbb832fd641d5dc80b0520cb55a99438..b50dc436db1fb4639b340bacffe0ae1fc027f8fa 100644 (file)
@@ -55,11 +55,9 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
-                       const struct ipv6_pinfo *np = inet6_sk(sk);
-
                        tw->tw_v6_daddr = sk->sk_v6_daddr;
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-                       tw->tw_ipv6only = np->ipv6only;
+                       tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
                /* Linkage updates. */
index 5db37cef50a9ccd80c642118f54dd4ecf04219b8..0a49632fac478f1ac17b3f0159cbdf748fe25110 100644 (file)
@@ -351,8 +351,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
        for (i = 0; i < pd->nr_chips; i++) {
                port_index = 0;
                while (port_index < DSA_MAX_PORTS) {
-                       if (pd->chip[i].port_names[port_index])
-                               kfree(pd->chip[i].port_names[port_index]);
+                       kfree(pd->chip[i].port_names[port_index]);
                        port_index++;
                }
                kfree(pd->chip[i].rtable);
index 64c5af0a10dd82169ccada3d82baa866a5b82cc8..45a1e34c89e0d975dd9f361a73a5617d69a10301 100644 (file)
@@ -340,8 +340,8 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        struct dsa_slave_priv *p;
        int ret;
 
-       slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv),
-                                name, ether_setup);
+       slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
+                                NET_NAME_UNKNOWN, ether_setup);
        if (slave_dev == NULL)
                return slave_dev;
 
index 5dc638cad2e1ff396347f53c74a0a038d3a4b77e..f405e05924078b2d30f45139cd698843a16256da 100644 (file)
@@ -390,7 +390,8 @@ EXPORT_SYMBOL(ether_setup);
 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
                                      unsigned int rxqs)
 {
-       return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
+       return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
+                               ether_setup, txqs, rxqs);
 }
 EXPORT_SYMBOL(alloc_etherdev_mqs);
 
index b68359f181cca9747c73d123d9dfd8b72f8e8c4f..9ae972a820f4683efb16eb8fd2c5d69e75c12049 100644 (file)
@@ -4,4 +4,5 @@
 
 obj-$(CONFIG_HSR)      += hsr.o
 
-hsr-y                  := hsr_main.o hsr_framereg.o hsr_device.o hsr_netlink.o
+hsr-y                  := hsr_main.o hsr_framereg.o hsr_device.o \
+                          hsr_netlink.o hsr_slave.o hsr_forward.o
index e5302b7f7ca9f0f6f67954689572d6a5b09a3c74..a138d75751df2fb46219168c01fd1bf5cce24d43 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * This file contains device methods for creating, using and destroying
  * virtual HSR devices.
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/etherdevice.h>
-#include <linux/if_arp.h>
 #include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
 #include "hsr_device.h"
+#include "hsr_slave.h"
 #include "hsr_framereg.h"
 #include "hsr_main.h"
+#include "hsr_forward.h"
 
 
 static bool is_admin_up(struct net_device *dev)
@@ -45,75 +46,108 @@ static void __hsr_set_operstate(struct net_device *dev, int transition)
        }
 }
 
-void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
-                      struct net_device *slave2)
+static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
 {
-       if (!is_admin_up(hsr_dev)) {
-               __hsr_set_operstate(hsr_dev, IF_OPER_DOWN);
+       if (!is_admin_up(master->dev)) {
+               __hsr_set_operstate(master->dev, IF_OPER_DOWN);
                return;
        }
 
-       if (is_slave_up(slave1) || is_slave_up(slave2))
-               __hsr_set_operstate(hsr_dev, IF_OPER_UP);
+       if (has_carrier)
+               __hsr_set_operstate(master->dev, IF_OPER_UP);
        else
-               __hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN);
+               __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
 }
 
-void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
-                    struct net_device *slave2)
+static bool hsr_check_carrier(struct hsr_port *master)
 {
-       if (is_slave_up(slave1) || is_slave_up(slave2))
-               netif_carrier_on(hsr_dev);
+       struct hsr_port *port;
+       bool has_carrier;
+
+       has_carrier = false;
+
+       rcu_read_lock();
+       hsr_for_each_port(master->hsr, port)
+               if ((port->type != HSR_PT_MASTER) && is_slave_up(port->dev)) {
+                       has_carrier = true;
+                       break;
+               }
+       rcu_read_unlock();
+
+       if (has_carrier)
+               netif_carrier_on(master->dev);
        else
-               netif_carrier_off(hsr_dev);
+               netif_carrier_off(master->dev);
+
+       return has_carrier;
 }
 
 
-void hsr_check_announce(struct net_device *hsr_dev, int old_operstate)
+static void hsr_check_announce(struct net_device *hsr_dev,
+                              unsigned char old_operstate)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
 
        if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
                /* Went up */
-               hsr_priv->announce_count = 0;
-               hsr_priv->announce_timer.expires = jiffies +
+               hsr->announce_count = 0;
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-               add_timer(&hsr_priv->announce_timer);
+               add_timer(&hsr->announce_timer);
        }
 
        if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
                /* Went down */
-               del_timer(&hsr_priv->announce_timer);
+               del_timer(&hsr->announce_timer);
 }
 
-
-int hsr_get_max_mtu(struct hsr_priv *hsr_priv)
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
 {
-       int mtu_max;
-
-       if (hsr_priv->slave[0] && hsr_priv->slave[1])
-               mtu_max = min(hsr_priv->slave[0]->mtu, hsr_priv->slave[1]->mtu);
-       else if (hsr_priv->slave[0])
-               mtu_max = hsr_priv->slave[0]->mtu;
-       else if (hsr_priv->slave[1])
-               mtu_max = hsr_priv->slave[1]->mtu;
-       else
-               mtu_max = HSR_TAGLEN;
+       struct hsr_port *master;
+       unsigned char old_operstate;
+       bool has_carrier;
 
-       return mtu_max - HSR_TAGLEN;
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       /* netif_stacked_transfer_operstate() cannot be used here since
+        * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+        */
+       old_operstate = master->dev->operstate;
+       has_carrier = hsr_check_carrier(master);
+       hsr_set_operstate(master, has_carrier);
+       hsr_check_announce(master->dev, old_operstate);
 }
 
+int hsr_get_max_mtu(struct hsr_priv *hsr)
+{
+       unsigned int mtu_max;
+       struct hsr_port *port;
+
+       mtu_max = ETH_DATA_LEN;
+       rcu_read_lock();
+       hsr_for_each_port(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       mtu_max = min(port->dev->mtu, mtu_max);
+       rcu_read_unlock();
+
+       if (mtu_max < HSR_HLEN)
+               return 0;
+       return mtu_max - HSR_HLEN;
+}
+
+
 static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 
-       if (new_mtu > hsr_get_max_mtu(hsr_priv)) {
-               netdev_info(hsr_priv->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
-                           HSR_TAGLEN);
+       if (new_mtu > hsr_get_max_mtu(hsr)) {
+               netdev_info(master->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
+                           HSR_HLEN);
                return -EINVAL;
        }
 
@@ -124,164 +158,95 @@ static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 
 static int hsr_dev_open(struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
-       int i;
-       char *slave_name;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
+       char designation;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
+       designation = '\0';
 
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (hsr_priv->slave[i])
-                       slave_name = hsr_priv->slave[i]->name;
-               else
-                       slave_name = "null";
-
-               if (!is_slave_up(hsr_priv->slave[i]))
-                       netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n",
-                                   'A' + i, slave_name);
+       rcu_read_lock();
+       hsr_for_each_port(hsr, port) {
+               if (port->type == HSR_PT_MASTER)
+                       continue;
+               switch (port->type) {
+               case HSR_PT_SLAVE_A:
+                       designation = 'A';
+                       break;
+               case HSR_PT_SLAVE_B:
+                       designation = 'B';
+                       break;
+               default:
+                       designation = '?';
+               }
+               if (!is_slave_up(port->dev))
+                       netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
+                                   designation, port->dev->name);
        }
+       rcu_read_unlock();
+
+       if (designation == '\0')
+               netdev_warn(dev, "No slave devices configured\n");
 
        return 0;
 }
 
+
 static int hsr_dev_close(struct net_device *dev)
 {
-       /* Nothing to do here. We could try to restore the state of the slaves
-        * to what they were before being changed by the hsr master dev's state,
-        * but they might have been changed manually in the mean time too, so
-        * taking them up or down here might be confusing and is probably not a
-        * good idea.
-        */
+       /* Nothing to do here. */
        return 0;
 }
 
 
-static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr_priv)
+static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+                                               netdev_features_t features)
 {
-       unsigned long irqflags;
+       netdev_features_t mask;
+       struct hsr_port *port;
 
-       /* IEC 62439-1:2010, p 48, says the 4-bit "path" field can take values
-        * between 0001-1001 ("ring identifier", for regular HSR frames),
-        * or 1111 ("HSR management", supervision frames). Unfortunately, the
-        * spec writers forgot to explain what a "ring identifier" is, or
-        * how it is used. So we just set this to 0001 for regular frames,
-        * and 1111 for supervision frames.
-        */
-       set_hsr_tag_path(&hsr_ethhdr->hsr_tag, 0x1);
+       mask = features;
 
-       /* IEC 62439-1:2010, p 12: "The link service data unit in an Ethernet
-        * frame is the content of the frame located between the Length/Type
-        * field and the Frame Check Sequence."
+       /* Mask out all features that, if supported by one device, should be
+        * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
         *
-        * IEC 62439-3, p 48, specifies the "original LPDU" to include the
-        * original "LT" field (what "LT" means is not explained anywhere as
-        * far as I can see - perhaps "Length/Type"?). So LSDU_size might
-        * equal original length + 2.
-        *   Also, the fact that this field is not used anywhere (might be used
-        * by a RedBox connecting HSR and PRP nets?) means I cannot test its
-        * correctness. Instead of guessing, I set this to 0 here, to make any
-        * problems immediately apparent. Anyone using this driver with PRP/HSR
-        * RedBoxes might need to fix this...
+        * Anything that's off in mask will not be enabled - so only things
+        * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
+        * may become enabled.
         */
-       set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, 0);
-
-       spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
-       hsr_ethhdr->hsr_tag.sequence_nr = htons(hsr_priv->sequence_nr);
-       hsr_priv->sequence_nr++;
-       spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+       features &= ~NETIF_F_ONE_FOR_ALL;
+       hsr_for_each_port(hsr, port)
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
 
-       hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
-
-       hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+       return features;
 }
 
-static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
-                     enum hsr_dev_idx dev_idx)
+static netdev_features_t hsr_fix_features(struct net_device *dev,
+                                         netdev_features_t features)
 {
-       struct hsr_ethhdr *hsr_ethhdr;
-
-       hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+       struct hsr_priv *hsr = netdev_priv(dev);
 
-       skb->dev = hsr_priv->slave[dev_idx];
-
-       hsr_addr_subst_dest(hsr_priv, &hsr_ethhdr->ethhdr, dev_idx);
-
-       /* Address substitution (IEC62439-3 pp 26, 50): replace mac
-        * address of outgoing frame with that of the outgoing slave's.
-        */
-       ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
-
-       return dev_queue_xmit(skb);
+       return hsr_features_recompute(hsr, features);
 }
 
 
 static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
-       struct hsr_ethhdr *hsr_ethhdr;
-       struct sk_buff *skb2;
-       int res1, res2;
-
-       hsr_priv = netdev_priv(dev);
-       hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
-
-       if ((skb->protocol != htons(ETH_P_PRP)) ||
-           (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
-               hsr_fill_tag(hsr_ethhdr, hsr_priv);
-               skb->protocol = htons(ETH_P_PRP);
-       }
-
-       skb2 = pskb_copy(skb, GFP_ATOMIC);
-
-       res1 = NET_XMIT_DROP;
-       if (likely(hsr_priv->slave[HSR_DEV_SLAVE_A]))
-               res1 = slave_xmit(skb, hsr_priv, HSR_DEV_SLAVE_A);
+       struct hsr_priv *hsr = netdev_priv(dev);
+       struct hsr_port *master;
 
-       res2 = NET_XMIT_DROP;
-       if (likely(skb2 && hsr_priv->slave[HSR_DEV_SLAVE_B]))
-               res2 = slave_xmit(skb2, hsr_priv, HSR_DEV_SLAVE_B);
-
-       if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
-                  res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
-               hsr_priv->dev->stats.tx_packets++;
-               hsr_priv->dev->stats.tx_bytes += skb->len;
-       } else {
-               hsr_priv->dev->stats.tx_dropped++;
-       }
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       skb->dev = master->dev;
+       hsr_forward_skb(skb, master);
 
        return NETDEV_TX_OK;
 }
 
 
-static int hsr_header_create(struct sk_buff *skb, struct net_device *dev,
-                            unsigned short type, const void *daddr,
-                            const void *saddr, unsigned int len)
-{
-       int res;
-
-       /* Make room for the HSR tag now. We will fill it in later (in
-        * hsr_dev_xmit)
-        */
-       if (skb_headroom(skb) < HSR_TAGLEN + ETH_HLEN)
-               return -ENOBUFS;
-       skb_push(skb, HSR_TAGLEN);
-
-       /* To allow VLAN/HSR combos we should probably use
-        * res = dev_hard_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
-        * here instead. It would require other changes too, though - e.g.
-        * separate headers for each slave etc...
-        */
-       res = eth_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
-       if (res <= 0)
-               return res;
-       skb_reset_mac_header(skb);
-
-       return res + HSR_TAGLEN;
-}
-
-
 static const struct header_ops hsr_header_ops = {
-       .create  = hsr_header_create,
+       .create  = eth_header,
        .parse   = eth_header_parse,
 };
 
@@ -291,67 +256,63 @@ static const struct header_ops hsr_header_ops = {
  */
 static int hsr_pad(int size)
 {
-       const int min_size = ETH_ZLEN - HSR_TAGLEN - ETH_HLEN;
+       const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN;
 
        if (size >= min_size)
                return size;
        return min_size;
 }
 
-static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
+static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
 {
-       struct hsr_priv *hsr_priv;
        struct sk_buff *skb;
        int hlen, tlen;
        struct hsr_sup_tag *hsr_stag;
        struct hsr_sup_payload *hsr_sp;
        unsigned long irqflags;
 
-       hlen = LL_RESERVED_SPACE(hsr_dev);
-       tlen = hsr_dev->needed_tailroom;
+       hlen = LL_RESERVED_SPACE(master->dev);
+       tlen = master->dev->needed_tailroom;
        skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
                        GFP_ATOMIC);
 
        if (skb == NULL)
                return;
 
-       hsr_priv = netdev_priv(hsr_dev);
-
        skb_reserve(skb, hlen);
 
-       skb->dev = hsr_dev;
+       skb->dev = master->dev;
        skb->protocol = htons(ETH_P_PRP);
        skb->priority = TC_PRIO_CONTROL;
 
        if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
-                           hsr_priv->sup_multicast_addr,
-                           skb->dev->dev_addr, skb->len) < 0)
+                           master->hsr->sup_multicast_addr,
+                           skb->dev->dev_addr, skb->len) <= 0)
                goto out;
+       skb_reset_mac_header(skb);
 
-       skb_pull(skb, sizeof(struct ethhdr));
-       hsr_stag = (typeof(hsr_stag)) skb->data;
+       hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(*hsr_stag));
 
        set_hsr_stag_path(hsr_stag, 0xf);
        set_hsr_stag_HSR_Ver(hsr_stag, 0);
 
-       spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
-       hsr_stag->sequence_nr = htons(hsr_priv->sequence_nr);
-       hsr_priv->sequence_nr++;
-       spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+       spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
+       hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
+       master->hsr->sequence_nr++;
+       spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 
        hsr_stag->HSR_TLV_Type = type;
        hsr_stag->HSR_TLV_Length = 12;
 
-       skb_push(skb, sizeof(struct ethhdr));
-
        /* Payload: MacAddressA */
        hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
-       ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
+       ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
 
-       dev_queue_xmit(skb);
+       hsr_forward_skb(skb, master);
        return;
 
 out:
+       WARN_ON_ONCE("HSR: Could not send supervision frame\n");
        kfree_skb(skb);
 }
 
@@ -360,59 +321,32 @@ out:
  */
 static void hsr_announce(unsigned long data)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
 
-       hsr_priv = (struct hsr_priv *) data;
+       hsr = (struct hsr_priv *) data;
 
-       if (hsr_priv->announce_count < 3) {
-               send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_ANNOUNCE);
-               hsr_priv->announce_count++;
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+
+       if (hsr->announce_count < 3) {
+               send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE);
+               hsr->announce_count++;
        } else {
-               send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_LIFE_CHECK);
+               send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK);
        }
 
-       if (hsr_priv->announce_count < 3)
-               hsr_priv->announce_timer.expires = jiffies +
+       if (hsr->announce_count < 3)
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
        else
-               hsr_priv->announce_timer.expires = jiffies +
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 
-       if (is_admin_up(hsr_priv->dev))
-               add_timer(&hsr_priv->announce_timer);
-}
-
-
-static void restore_slaves(struct net_device *hsr_dev)
-{
-       struct hsr_priv *hsr_priv;
-       int i;
-       int res;
-
-       hsr_priv = netdev_priv(hsr_dev);
-
-       rtnl_lock();
-
-       /* Restore promiscuity */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (!hsr_priv->slave[i])
-                       continue;
-               res = dev_set_promiscuity(hsr_priv->slave[i], -1);
-               if (res)
-                       netdev_info(hsr_dev,
-                                   "Cannot restore slave promiscuity (%s, %d)\n",
-                                   hsr_priv->slave[i]->name, res);
-       }
-
-       rtnl_unlock();
-}
-
-static void reclaim_hsr_dev(struct rcu_head *rh)
-{
-       struct hsr_priv *hsr_priv;
+       if (is_admin_up(master->dev))
+               add_timer(&hsr->announce_timer);
 
-       hsr_priv = container_of(rh, struct hsr_priv, rcu_head);
-       free_netdev(hsr_priv->dev);
+       rcu_read_unlock();
 }
 
 
@@ -421,14 +355,18 @@ static void reclaim_hsr_dev(struct rcu_head *rh)
  */
 static void hsr_dev_destroy(struct net_device *hsr_dev)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
+       hsr_for_each_port(hsr, port)
+               hsr_del_port(port);
 
-       del_timer(&hsr_priv->announce_timer);
-       unregister_hsr_master(hsr_priv);    /* calls list_del_rcu on hsr_priv */
-       restore_slaves(hsr_dev);
-       call_rcu(&hsr_priv->rcu_head, reclaim_hsr_dev);   /* reclaim hsr_priv */
+       del_timer_sync(&hsr->prune_timer);
+       del_timer_sync(&hsr->announce_timer);
+
+       synchronize_rcu();
+       free_netdev(hsr_dev);
 }
 
 static const struct net_device_ops hsr_device_ops = {
@@ -436,62 +374,51 @@ static const struct net_device_ops hsr_device_ops = {
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
+       .ndo_fix_features = hsr_fix_features,
 };
 
+static struct device_type hsr_type = {
+       .name = "hsr",
+};
 
 void hsr_dev_setup(struct net_device *dev)
 {
        random_ether_addr(dev->dev_addr);
 
        ether_setup(dev);
-       dev->header_ops          = &hsr_header_ops;
-       dev->netdev_ops          = &hsr_device_ops;
-       dev->tx_queue_len        = 0;
+       dev->header_ops = &hsr_header_ops;
+       dev->netdev_ops = &hsr_device_ops;
+       SET_NETDEV_DEVTYPE(dev, &hsr_type);
+       dev->tx_queue_len = 0;
 
        dev->destructor = hsr_dev_destroy;
+
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+                          NETIF_F_HW_VLAN_CTAG_TX;
+
+       dev->features = dev->hw_features;
+
+       /* Prevent recursive tx locking */
+       dev->features |= NETIF_F_LLTX;
+       /* VLAN on top of HSR needs testing and probably some work on
+        * hsr_header_create() etc.
+        */
+       dev->features |= NETIF_F_VLAN_CHALLENGED;
+       /* Not sure about this. Taken from bridge code. netdev_features.h says
+        * it means "Does not change network namespaces".
+        */
+       dev->features |= NETIF_F_NETNS_LOCAL;
 }
 
 
 /* Return true if dev is a HSR master; return false otherwise.
  */
-bool is_hsr_master(struct net_device *dev)
+inline bool is_hsr_master(struct net_device *dev)
 {
        return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
 }
 
-static int check_slave_ok(struct net_device *dev)
-{
-       /* Don't allow HSR on non-ethernet like devices */
-       if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
-           (dev->addr_len != ETH_ALEN)) {
-               netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
-               return -EINVAL;
-       }
-
-       /* Don't allow enslaving hsr devices */
-       if (is_hsr_master(dev)) {
-               netdev_info(dev, "Cannot create trees of HSR devices.\n");
-               return -EINVAL;
-       }
-
-       if (is_hsr_slave(dev)) {
-               netdev_info(dev, "This device is already a HSR slave.\n");
-               return -EINVAL;
-       }
-
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
-               netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
-               return -EINVAL;
-       }
-
-       /* HSR over bonded devices has not been tested, but I'm not sure it
-        * won't work...
-        */
-
-       return 0;
-}
-
-
 /* Default multicast address for HSR Supervision frames */
 static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
        0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
@@ -500,97 +427,74 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec)
 {
-       struct hsr_priv *hsr_priv;
-       int i;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
        int res;
 
-       hsr_priv = netdev_priv(hsr_dev);
-       hsr_priv->dev = hsr_dev;
-       INIT_LIST_HEAD(&hsr_priv->node_db);
-       INIT_LIST_HEAD(&hsr_priv->self_node_db);
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
-               hsr_priv->slave[i] = slave[i];
-
-       spin_lock_init(&hsr_priv->seqnr_lock);
-       /* Overflow soon to find bugs easier: */
-       hsr_priv->sequence_nr = USHRT_MAX - 1024;
-
-       init_timer(&hsr_priv->announce_timer);
-       hsr_priv->announce_timer.function = hsr_announce;
-       hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
+       hsr = netdev_priv(hsr_dev);
+       INIT_LIST_HEAD(&hsr->ports);
+       INIT_LIST_HEAD(&hsr->node_db);
+       INIT_LIST_HEAD(&hsr->self_node_db);
 
-       ether_addr_copy(hsr_priv->sup_multicast_addr, def_multicast_addr);
-       hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
+       ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
 
-/* FIXME: should I modify the value of these?
- *
- * - hsr_dev->flags - i.e.
- *                     IFF_MASTER/SLAVE?
- * - hsr_dev->priv_flags - i.e.
- *                     IFF_EBRIDGE?
- *                     IFF_TX_SKB_SHARING?
- *                     IFF_HSR_MASTER/SLAVE?
- */
+       /* Make sure we recognize frames from ourselves in hsr_rcv() */
+       res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
+                                  slave[1]->dev_addr);
+       if (res < 0)
+               return res;
 
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               res = check_slave_ok(slave[i]);
-               if (res)
-                       return res;
-       }
+       spin_lock_init(&hsr->seqnr_lock);
+       /* Overflow soon to find bugs easier: */
+       hsr->sequence_nr = HSR_SEQNR_START;
 
-       hsr_dev->features = slave[0]->features & slave[1]->features;
-       /* Prevent recursive tx locking */
-       hsr_dev->features |= NETIF_F_LLTX;
-       /* VLAN on top of HSR needs testing and probably some work on
-        * hsr_header_create() etc.
-        */
-       hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
+       init_timer(&hsr->announce_timer);
+       hsr->announce_timer.function = hsr_announce;
+       hsr->announce_timer.data = (unsigned long) hsr;
 
-       /* Set hsr_dev's MAC address to that of mac_slave1 */
-       ether_addr_copy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr);
+       init_timer(&hsr->prune_timer);
+       hsr->prune_timer.function = hsr_prune_nodes;
+       hsr->prune_timer.data = (unsigned long) hsr;
 
-       /* Set required header length */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (slave[i]->hard_header_len + HSR_TAGLEN >
-                                               hsr_dev->hard_header_len)
-                       hsr_dev->hard_header_len =
-                                       slave[i]->hard_header_len + HSR_TAGLEN;
-       }
+       ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
+       hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
-       /* MTU */
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
-               if (slave[i]->mtu - HSR_TAGLEN < hsr_dev->mtu)
-                       hsr_dev->mtu = slave[i]->mtu - HSR_TAGLEN;
+       /* FIXME: should I modify the value of these?
+        *
+        * - hsr_dev->flags - i.e.
+        *                      IFF_MASTER/SLAVE?
+        * - hsr_dev->priv_flags - i.e.
+        *                      IFF_EBRIDGE?
+        *                      IFF_TX_SKB_SHARING?
+        *                      IFF_HSR_MASTER/SLAVE?
+        */
 
        /* Make sure the 1st call to netif_carrier_on() gets through */
        netif_carrier_off(hsr_dev);
 
-       /* Promiscuity */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               res = dev_set_promiscuity(slave[i], 1);
-               if (res) {
-                       netdev_info(hsr_dev, "Cannot set slave promiscuity (%s, %d)\n",
-                                   slave[i]->name, res);
-                       goto fail;
-               }
-       }
+       res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+       if (res)
+               return res;
 
-       /* Make sure we recognize frames from ourselves in hsr_rcv() */
-       res = hsr_create_self_node(&hsr_priv->self_node_db,
-                                       hsr_dev->dev_addr,
-                                       hsr_priv->slave[1]->dev_addr);
-       if (res < 0)
+       res = register_netdevice(hsr_dev);
+       if (res)
                goto fail;
 
-       res = register_netdevice(hsr_dev);
+       res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
+       if (res)
+               goto fail;
+       res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
        if (res)
                goto fail;
 
-       register_hsr_master(hsr_priv);
+       hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+       add_timer(&hsr->prune_timer);
 
        return 0;
 
 fail:
-       restore_slaves(hsr_dev);
+       hsr_for_each_port(hsr, port)
+               hsr_del_port(port);
+
        return res;
 }
index 2c7148e73914e9b6c5dd96a1ea488edd7402d451..108a5d59d2a6433d27960815fa446bddf0e7a304 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #ifndef __HSR_DEVICE_H
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec);
-void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
-                      struct net_device *slave2);
-void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
-                    struct net_device *slave2);
-void hsr_check_announce(struct net_device *hsr_dev, int old_operstate);
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
-int hsr_get_max_mtu(struct hsr_priv *hsr_priv);
+int hsr_get_max_mtu(struct hsr_priv *hsr);
 
 #endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
new file mode 100644 (file)
index 0000000..7871ed6
--- /dev/null
@@ -0,0 +1,368 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#include "hsr_forward.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+
+
+struct hsr_node;
+
+struct hsr_frame_info {
+       struct sk_buff *skb_std;
+       struct sk_buff *skb_hsr;
+       struct hsr_port *port_rcv;
+       struct hsr_node *node_src;
+       u16 sequence_nr;
+       bool is_supervision;
+       bool is_vlan;
+       bool is_local_dest;
+       bool is_local_exclusive;
+};
+
+
+/* The uses I can see for these HSR supervision frames are:
+ * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
+ *    22") to reset any sequence_nr counters belonging to that node. Useful if
+ *    the other node's counter has been reset for some reason.
+ *    --
+ *    Or not - resetting the counter and bridging the frame would create a
+ *    loop, unfortunately.
+ *
+ * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
+ *    frame is received from a particular node, we know something is wrong.
+ *    We just register these (as with normal frames) and throw them away.
+ *
+ * 3) Allow different MAC addresses for the two slave interfaces, using the
+ *    MacAddressA field.
+ */
+static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
+{
+       struct hsr_ethhdr_sp *hdr;
+
+       WARN_ON_ONCE(!skb_mac_header_was_set(skb));
+       hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+
+       if (!ether_addr_equal(hdr->ethhdr.h_dest,
+                             hsr->sup_multicast_addr))
+               return false;
+
+       if (get_hsr_stag_path(&hdr->hsr_sup) != 0x0f)
+               return false;
+       if ((hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+           (hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+               return false;
+       if (hdr->hsr_sup.HSR_TLV_Length != 12)
+               return false;
+
+       return true;
+}
+
+
+static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
+                                          struct hsr_frame_info *frame)
+{
+       struct sk_buff *skb;
+       int copylen;
+       unsigned char *dst, *src;
+
+       skb_pull(skb_in, HSR_HLEN);
+       skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
+       skb_push(skb_in, HSR_HLEN);
+       if (skb == NULL)
+               return NULL;
+
+       skb_reset_mac_header(skb);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start -= HSR_HLEN;
+
+       copylen = 2*ETH_ALEN;
+       if (frame->is_vlan)
+               copylen += VLAN_HLEN;
+       src = skb_mac_header(skb_in);
+       dst = skb_mac_header(skb);
+       memcpy(dst, src, copylen);
+
+       skb->protocol = eth_hdr(skb)->h_proto;
+       return skb;
+}
+
+static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
+                                             struct hsr_port *port)
+{
+       if (!frame->skb_std)
+               frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
+       return skb_clone(frame->skb_std, GFP_ATOMIC);
+}
+
+
+static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
+                        struct hsr_port *port)
+{
+       struct hsr_ethhdr *hsr_ethhdr;
+       int lane_id;
+       int lsdu_size;
+
+       if (port->type == HSR_PT_SLAVE_A)
+               lane_id = 0;
+       else
+               lane_id = 1;
+
+       lsdu_size = skb->len - 14;
+       if (frame->is_vlan)
+               lsdu_size -= 4;
+
+       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+
+       set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
+       set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
+       hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
+       hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
+       hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+}
+
+static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
+                                        struct hsr_frame_info *frame,
+                                        struct hsr_port *port)
+{
+       int movelen;
+       unsigned char *dst, *src;
+       struct sk_buff *skb;
+
+       /* Create the new skb with enough headroom to fit the HSR tag */
+       skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
+       if (skb == NULL)
+               return NULL;
+       skb_reset_mac_header(skb);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += HSR_HLEN;
+
+       movelen = ETH_HLEN;
+       if (frame->is_vlan)
+               movelen += VLAN_HLEN;
+
+       src = skb_mac_header(skb);
+       dst = skb_push(skb, HSR_HLEN);
+       memmove(dst, src, movelen);
+       skb_reset_mac_header(skb);
+
+       hsr_fill_tag(skb, frame, port);
+
+       return skb;
+}
+
+/* If the original frame was an HSR tagged frame, just clone it to be sent
+ * unchanged. Otherwise, create a private frame especially tagged for 'port'.
+ */
+static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
+                                           struct hsr_port *port)
+{
+       if (frame->skb_hsr)
+               return skb_clone(frame->skb_hsr, GFP_ATOMIC);
+
+       if ((port->type != HSR_PT_SLAVE_A) && (port->type != HSR_PT_SLAVE_B)) {
+               WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
+               return NULL;
+       }
+
+       return create_tagged_skb(frame->skb_std, frame, port);
+}
+
+
+static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+                              struct hsr_node *node_src)
+{
+       bool was_multicast_frame;
+       int res;
+
+       was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
+       hsr_addr_subst_source(node_src, skb);
+       skb_pull(skb, ETH_HLEN);
+       res = netif_rx(skb);
+       if (res == NET_RX_DROP) {
+               dev->stats.rx_dropped++;
+       } else {
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += skb->len;
+               if (was_multicast_frame)
+                       dev->stats.multicast++;
+       }
+}
+
+static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
+                   struct hsr_frame_info *frame)
+{
+       if (frame->port_rcv->type == HSR_PT_MASTER) {
+               hsr_addr_subst_dest(frame->node_src, skb, port);
+
+               /* Address substitution (IEC62439-3 pp 26, 50): replace mac
+                * address of outgoing frame with that of the outgoing slave's.
+                */
+               ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
+       }
+       return dev_queue_xmit(skb);
+}
+
+
+/* Forward the frame through all devices except:
+ * - Back through the receiving device
+ * - If it's a HSR frame: through a device where it has passed before
+ * - To the local HSR master only if the frame is directly addressed to it, or
+ *   a non-supervision multicast or broadcast frame.
+ *
+ * HSR slave devices should insert a HSR tag into the frame, or forward the
+ * frame unchanged if it's already tagged. Interlink devices should strip HSR
+ * tags if they're of the non-HSR type (but only after duplicate discard). The
+ * master device always strips HSR tags.
+ */
+static void hsr_forward_do(struct hsr_frame_info *frame)
+{
+       struct hsr_port *port;
+       struct sk_buff *skb;
+
+       hsr_for_each_port(frame->port_rcv->hsr, port) {
+               /* Don't send frame back the way it came */
+               if (port == frame->port_rcv)
+                       continue;
+
+               /* Don't deliver locally unless we should */
+               if ((port->type == HSR_PT_MASTER) && !frame->is_local_dest)
+                       continue;
+
+               /* Deliver frames directly addressed to us to master only */
+               if ((port->type != HSR_PT_MASTER) && frame->is_local_exclusive)
+                       continue;
+
+               /* Don't send frame over port where it has been sent before */
+               if (hsr_register_frame_out(port, frame->node_src,
+                                          frame->sequence_nr))
+                       continue;
+
+               if (frame->is_supervision && (port->type == HSR_PT_MASTER)) {
+                       hsr_handle_sup_frame(frame->skb_hsr,
+                                            frame->node_src,
+                                            frame->port_rcv);
+                       continue;
+               }
+
+               if (port->type != HSR_PT_MASTER)
+                       skb = frame_get_tagged_skb(frame, port);
+               else
+                       skb = frame_get_stripped_skb(frame, port);
+               if (skb == NULL) {
+                       /* FIXME: Record the dropped frame? */
+                       continue;
+               }
+
+               skb->dev = port->dev;
+               if (port->type == HSR_PT_MASTER)
+                       hsr_deliver_master(skb, port->dev, frame->node_src);
+               else
+                       hsr_xmit(skb, port, frame);
+       }
+}
+
+
+static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
+                            struct hsr_frame_info *frame)
+{
+       struct net_device *master_dev;
+
+       master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
+
+       if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
+               frame->is_local_exclusive = true;
+               skb->pkt_type = PACKET_HOST;
+       } else {
+               frame->is_local_exclusive = false;
+       }
+
+       if ((skb->pkt_type == PACKET_HOST) ||
+           (skb->pkt_type == PACKET_MULTICAST) ||
+           (skb->pkt_type == PACKET_BROADCAST)) {
+               frame->is_local_dest = true;
+       } else {
+               frame->is_local_dest = false;
+       }
+}
+
+
+static int hsr_fill_frame_info(struct hsr_frame_info *frame,
+                              struct sk_buff *skb, struct hsr_port *port)
+{
+       struct ethhdr *ethhdr;
+       unsigned long irqflags;
+
+       frame->is_supervision = is_supervision_frame(port->hsr, skb);
+       frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
+                                      frame->is_supervision);
+       if (frame->node_src == NULL)
+               return -1; /* Unknown node and !is_supervision, or no mem */
+
+       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       frame->is_vlan = false;
+       if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
+               frame->is_vlan = true;
+               /* FIXME: */
+               WARN_ONCE(1, "HSR: VLAN not yet supported");
+       }
+       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+               frame->skb_std = NULL;
+               frame->skb_hsr = skb;
+               frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+       } else {
+               frame->skb_std = skb;
+               frame->skb_hsr = NULL;
+               /* Sequence nr for the master node */
+               spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
+               frame->sequence_nr = port->hsr->sequence_nr;
+               port->hsr->sequence_nr++;
+               spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
+       }
+
+       frame->port_rcv = port;
+       check_local_dest(port->hsr, skb, frame);
+
+       return 0;
+}
+
+/* Must be called holding rcu read lock (because of the port parameter) */
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+{
+       struct hsr_frame_info frame;
+
+       if (skb_mac_header(skb) != skb->data) {
+               WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
+                         __FILE__, __LINE__, port->dev->name);
+               goto out_drop;
+       }
+
+       if (hsr_fill_frame_info(&frame, skb, port) < 0)
+               goto out_drop;
+       hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
+       hsr_forward_do(&frame);
+
+       if (frame.skb_hsr != NULL)
+               kfree_skb(frame.skb_hsr);
+       if (frame.skb_std != NULL)
+               kfree_skb(frame.skb_std);
+       return;
+
+out_drop:
+       port->dev->stats.tx_dropped++;
+       kfree_skb(skb);
+}
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
new file mode 100644 (file)
index 0000000..5c5bc4b
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#ifndef __HSR_FORWARD_H
+#define __HSR_FORWARD_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port);
+
+#endif /* __HSR_FORWARD_H */
index 83e58449366a92bcc4e349241337914bca8dcd10..bace124d14ef570375b96fbde362b2f6f059f630 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * The HSR spec says never to forward the same frame twice on the same
  * interface. A frame is identified by its source MAC address and its HSR
 #include "hsr_netlink.h"
 
 
-struct node_entry {
-       struct list_head mac_list;
-       unsigned char   MacAddressA[ETH_ALEN];
-       unsigned char   MacAddressB[ETH_ALEN];
-       enum hsr_dev_idx   AddrB_if;    /* The local slave through which AddrB
-                                        * frames are received from this node
-                                        */
-       unsigned long   time_in[HSR_MAX_SLAVE];
-       bool            time_in_stale[HSR_MAX_SLAVE];
-       u16             seq_out[HSR_MAX_DEV];
-       struct rcu_head rcu_head;
+struct hsr_node {
+       struct list_head        mac_list;
+       unsigned char           MacAddressA[ETH_ALEN];
+       unsigned char           MacAddressB[ETH_ALEN];
+       /* Local slave through which AddrB frames are received from this node */
+       enum hsr_port_type      AddrB_port;
+       unsigned long           time_in[HSR_PT_PORTS];
+       bool                    time_in_stale[HSR_PT_PORTS];
+       u16                     seq_out[HSR_PT_PORTS];
+       struct rcu_head         rcu_head;
 };
 
-/*     TODO: use hash lists for mac addresses (linux/jhash.h)?    */
 
+/*     TODO: use hash lists for mac addresses (linux/jhash.h)?    */
 
 
-/* Search for mac entry. Caller must hold rcu read lock.
+/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
  */
-static struct node_entry *find_node_by_AddrA(struct list_head *node_db,
-                                            const unsigned char addr[ETH_ALEN])
+static bool seq_nr_after(u16 a, u16 b)
 {
-       struct node_entry *node;
-
-       list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, addr))
-                       return node;
-       }
+       /* Remove inconsistency where
+        * seq_nr_after(a, b) == seq_nr_before(a, b)
+        */
+       if ((int) b - a == 32768)
+               return false;
 
-       return NULL;
+       return (((s16) (b - a)) < 0);
 }
+#define seq_nr_before(a, b)            seq_nr_after((b), (a))
+#define seq_nr_after_or_eq(a, b)       (!seq_nr_before((a), (b)))
+#define seq_nr_before_or_eq(a, b)      (!seq_nr_after((a), (b)))
 
 
-/* Search for mac entry. Caller must hold rcu read lock.
- */
-static struct node_entry *find_node_by_AddrB(struct list_head *node_db,
-                                            const unsigned char addr[ETH_ALEN])
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 {
-       struct node_entry *node;
+       struct hsr_node *node;
 
-       list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressB, addr))
-                       return node;
+       node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
+                                     mac_list);
+       if (!node) {
+               WARN_ONCE(1, "HSR: No self node\n");
+               return false;
        }
 
-       return NULL;
-}
+       if (ether_addr_equal(addr, node->MacAddressA))
+               return true;
+       if (ether_addr_equal(addr, node->MacAddressB))
+               return true;
 
+       return false;
+}
 
 /* Search for mac entry. Caller must hold rcu read lock.
  */
-struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb)
+static struct hsr_node *find_node_by_AddrA(struct list_head *node_db,
+                                          const unsigned char addr[ETH_ALEN])
 {
-       struct node_entry *node;
-       struct ethhdr *ethhdr;
-
-       if (!skb_mac_header_was_set(skb))
-               return NULL;
-
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       struct hsr_node *node;
 
        list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
-                       return node;
-               if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+               if (ether_addr_equal(node->MacAddressA, addr))
                        return node;
        }
 
@@ -102,7 +99,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN])
 {
-       struct node_entry *node, *oldnode;
+       struct hsr_node *node, *oldnode;
 
        node = kmalloc(sizeof(*node), GFP_KERNEL);
        if (!node)
@@ -113,7 +110,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
 
        rcu_read_lock();
        oldnode = list_first_or_null_rcu(self_node_db,
-                                               struct node_entry, mac_list);
+                                               struct hsr_node, mac_list);
        if (oldnode) {
                list_replace_rcu(&oldnode->mac_list, &node->mac_list);
                rcu_read_unlock();
@@ -128,135 +125,144 @@ int hsr_create_self_node(struct list_head *self_node_db,
 }
 
 
-/* Add/merge node to the database of nodes. 'skb' must contain an HSR
- * supervision frame.
- * - If the supervision header's MacAddressA field is not yet in the database,
- * this frame is from an hitherto unknown node - add it to the database.
- * - If the sender's MAC address is not the same as its MacAddressA address,
- * the node is using PICS_SUBS (address substitution). Record the sender's
- * address as the node's MacAddressB.
- *
- * This function needs to work even if the sender node has changed one of its
- * slaves' MAC addresses. In this case, there are four different cases described
- * by (Addr-changed, received-from) pairs as follows. Note that changing the
- * SlaveA address is equal to changing the node's own address:
- *
- * - (AddrB, SlaveB): The new AddrB will be recorded by PICS_SUBS code since
- *                   node == NULL.
- * - (AddrB, SlaveA): Will work as usual (the AddrB change won't be detected
- *                   from this frame).
- *
- * - (AddrA, SlaveB): The old node will be found. We need to detect this and
- *                   remove the node.
- * - (AddrA, SlaveA): A new node will be registered (non-PICS_SUBS at first).
- *                   The old one will be pruned after HSR_NODE_FORGET_TIME.
- *
- * We also need to detect if the sender's SlaveA and SlaveB cables have been
- * swapped.
+/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+ * originating from the newly added node.
  */
-struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
-                                 struct node_entry *node,
-                                 struct sk_buff *skb,
-                                 enum hsr_dev_idx dev_idx)
+struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+                             u16 seq_out)
 {
-       struct hsr_sup_payload *hsr_sp;
-       struct hsr_ethhdr_sp *hsr_ethsup;
-       int i;
+       struct hsr_node *node;
        unsigned long now;
-
-       hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
-       hsr_sp = (struct hsr_sup_payload *) skb->data;
-
-       if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) {
-               /* Node has changed its AddrA, frame was received from SlaveB */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node && (dev_idx == node->AddrB_if) &&
-           !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) {
-               /* Cables have been swapped */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node && (dev_idx != node->AddrB_if) &&
-           (node->AddrB_if != HSR_DEV_NONE) &&
-           !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) {
-               /* Cables have been swapped */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node)
-               return node;
-
-       node = find_node_by_AddrA(&hsr_priv->node_db, hsr_sp->MacAddressA);
-       if (node) {
-               /* Node is known, but frame was received from an unknown
-                * address. Node is PICS_SUBS capable; merge its AddrB.
-                */
-               ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
-               node->AddrB_if = dev_idx;
-               return node;
-       }
+       int i;
 
        node = kzalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return NULL;
 
-       ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA);
-       ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
-       if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
-               node->AddrB_if = dev_idx;
-       else
-               node->AddrB_if = HSR_DEV_NONE;
+       ether_addr_copy(node->MacAddressA, addr);
 
        /* We are only interested in time diffs here, so use current jiffies
         * as initialization. (0 could trigger an spurious ring error warning).
         */
        now = jiffies;
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
+       for (i = 0; i < HSR_PT_PORTS; i++)
                node->time_in[i] = now;
-       for (i = 0; i < HSR_MAX_DEV; i++)
-               node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1;
+       for (i = 0; i < HSR_PT_PORTS; i++)
+               node->seq_out[i] = seq_out;
 
-       list_add_tail_rcu(&node->mac_list, &hsr_priv->node_db);
+       list_add_tail_rcu(&node->mac_list, node_db);
 
        return node;
 }
 
+/* Get the hsr_node from which 'skb' was sent.
+ */
+struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+                             bool is_sup)
+{
+       struct hsr_node *node;
+       struct ethhdr *ethhdr;
+       u16 seq_out;
+
+       if (!skb_mac_header_was_set(skb))
+               return NULL;
+
+       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+       list_for_each_entry_rcu(node, node_db, mac_list) {
+               if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
+                       return node;
+               if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+                       return node;
+       }
+
+       if (!is_sup)
+               return NULL; /* Only supervision frame may create node entry */
+
+       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+               /* Use the existing sequence_nr from the tag as starting point
+                * for filtering duplicate frames.
+                */
+               seq_out = hsr_get_skb_sequence_nr(skb) - 1;
+       } else {
+               WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+               seq_out = 0;
+       }
+
+       return hsr_add_node(node_db, ethhdr->h_source, seq_out);
+}
+
+/* Use the Supervision frame's info about an eventual MacAddressB for merging
+ * nodes that has previously had their MacAddressB registered as a separate
+ * node.
+ */
+void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
+                         struct hsr_port *port_rcv)
+{
+       struct hsr_node *node_real;
+       struct hsr_sup_payload *hsr_sp;
+       struct list_head *node_db;
+       int i;
+
+       skb_pull(skb, sizeof(struct hsr_ethhdr_sp));
+       hsr_sp = (struct hsr_sup_payload *) skb->data;
+
+       if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA))
+               /* Not sent from MacAddressB of a PICS_SUBS capable node */
+               goto done;
+
+       /* Merge node_curr (registered on MacAddressB) into node_real */
+       node_db = &port_rcv->hsr->node_db;
+       node_real = find_node_by_AddrA(node_db, hsr_sp->MacAddressA);
+       if (!node_real)
+               /* No frame received from AddrA of this node yet */
+               node_real = hsr_add_node(node_db, hsr_sp->MacAddressA,
+                                        HSR_SEQNR_START - 1);
+       if (!node_real)
+               goto done; /* No mem */
+       if (node_real == node_curr)
+               /* Node has already been merged */
+               goto done;
+
+       ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source);
+       for (i = 0; i < HSR_PT_PORTS; i++) {
+               if (!node_curr->time_in_stale[i] &&
+                   time_after(node_curr->time_in[i], node_real->time_in[i])) {
+                       node_real->time_in[i] = node_curr->time_in[i];
+                       node_real->time_in_stale[i] = node_curr->time_in_stale[i];
+               }
+               if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
+                       node_real->seq_out[i] = node_curr->seq_out[i];
+       }
+       node_real->AddrB_port = port_rcv->type;
+
+       list_del_rcu(&node_curr->mac_list);
+       kfree_rcu(node_curr, rcu_head);
+
+done:
+       skb_push(skb, sizeof(struct hsr_ethhdr_sp));
+}
+
 
 /* 'skb' is a frame meant for this host, that is to be passed to upper layers.
  *
- * If the frame was sent by a node's B interface, replace the sender
+ * If the frame was sent by a node's B interface, replace the source
  * address with that node's "official" address (MacAddressA) so that upper
  * layers recognize where it came from.
  */
-void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
 {
-       struct ethhdr *ethhdr;
-       struct node_entry *node;
-
        if (!skb_mac_header_was_set(skb)) {
                WARN_ONCE(1, "%s: Mac header not set\n", __func__);
                return;
        }
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
 
-       rcu_read_lock();
-       node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
-       if (node)
-               ether_addr_copy(ethhdr->h_source, node->MacAddressA);
-       rcu_read_unlock();
+       memcpy(&eth_hdr(skb)->h_source, node->MacAddressA, ETH_ALEN);
 }
 
-
 /* 'skb' is a frame meant for another host.
- * 'hsr_dev_idx' is the HSR index of the outgoing device
+ * 'port' is the outgoing interface
  *
  * Substitute the target (dest) MAC address if necessary, so the it matches the
  * recipient interface MAC address, regardless of whether that is the
@@ -264,47 +270,44 @@ void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
  * This is needed to keep the packets flowing through switches that learn on
  * which "side" the different interfaces are.
  */
-void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
-                        enum hsr_dev_idx dev_idx)
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+                        struct hsr_port *port)
 {
-       struct node_entry *node;
+       struct hsr_node *node_dst;
 
-       rcu_read_lock();
-       node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
-       if (node && (node->AddrB_if == dev_idx))
-               ether_addr_copy(ethhdr->h_dest, node->MacAddressB);
-       rcu_read_unlock();
-}
+       if (!skb_mac_header_was_set(skb)) {
+               WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+               return;
+       }
 
+       if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
+               return;
 
-/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
- * false otherwise.
- */
-static bool seq_nr_after(u16 a, u16 b)
-{
-       /* Remove inconsistency where
-        * seq_nr_after(a, b) == seq_nr_before(a, b)
-        */
-       if ((int) b - a == 32768)
-               return false;
+       node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest);
+       if (!node_dst) {
+               WARN_ONCE(1, "%s: Unknown node\n", __func__);
+               return;
+       }
+       if (port->type != node_dst->AddrB_port)
+               return;
 
-       return (((s16) (b - a)) < 0);
+       ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->MacAddressB);
 }
-#define seq_nr_before(a, b)            seq_nr_after((b), (a))
-#define seq_nr_after_or_eq(a, b)       (!seq_nr_before((a), (b)))
-#define seq_nr_before_or_eq(a, b)      (!seq_nr_after((a), (b)))
 
 
-void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+                          u16 sequence_nr)
 {
-       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) {
-               WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+       /* Don't register incoming frames without a valid sequence number. This
+        * ensures entries of restarted nodes gets pruned so that they can
+        * re-register and resume communications.
+        */
+       if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
                return;
-       }
-       node->time_in[dev_idx] = jiffies;
-       node->time_in_stale[dev_idx] = false;
-}
 
+       node->time_in[port->type] = jiffies;
+       node->time_in_stale[port->type] = false;
+}
 
 /* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
  * ethhdr->h_source address and skb->mac_header set.
@@ -314,102 +317,87 @@ void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
  *      0 otherwise, or
  *      negative error code on error
  */
-int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
-                          struct sk_buff *skb)
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+                          u16 sequence_nr)
 {
-       struct hsr_ethhdr *hsr_ethhdr;
-       u16 sequence_nr;
-
-       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
-               WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
-               return -EINVAL;
-       }
-       if (!skb_mac_header_was_set(skb)) {
-               WARN_ONCE(1, "%s: Mac header not set\n", __func__);
-               return -EINVAL;
-       }
-       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
-
-       sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
-       if (seq_nr_before_or_eq(sequence_nr, node->seq_out[dev_idx]))
+       if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]))
                return 1;
 
-       node->seq_out[dev_idx] = sequence_nr;
+       node->seq_out[port->type] = sequence_nr;
        return 0;
 }
 
 
-
-static bool is_late(struct node_entry *node, enum hsr_dev_idx dev_idx)
+static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+                                     struct hsr_node *node)
 {
-       enum hsr_dev_idx other;
-
-       if (node->time_in_stale[dev_idx])
-               return true;
-
-       if (dev_idx == HSR_DEV_SLAVE_A)
-               other = HSR_DEV_SLAVE_B;
-       else
-               other = HSR_DEV_SLAVE_A;
-
-       if (node->time_in_stale[other])
-               return false;
+       if (node->time_in_stale[HSR_PT_SLAVE_A])
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (node->time_in_stale[HSR_PT_SLAVE_B])
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+
+       if (time_after(node->time_in[HSR_PT_SLAVE_B],
+                      node->time_in[HSR_PT_SLAVE_A] +
+                                       msecs_to_jiffies(MAX_SLAVE_DIFF)))
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (time_after(node->time_in[HSR_PT_SLAVE_A],
+                      node->time_in[HSR_PT_SLAVE_B] +
+                                       msecs_to_jiffies(MAX_SLAVE_DIFF)))
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
 
-       if (time_after(node->time_in[other], node->time_in[dev_idx] +
-                      msecs_to_jiffies(MAX_SLAVE_DIFF)))
-               return true;
-
-       return false;
+       return NULL;
 }
 
 
 /* Remove stale sequence_nr records. Called by timer every
  * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
  */
-void hsr_prune_nodes(struct hsr_priv *hsr_priv)
+void hsr_prune_nodes(unsigned long data)
 {
-       struct node_entry *node;
+       struct hsr_priv *hsr;
+       struct hsr_node *node;
+       struct hsr_port *port;
        unsigned long timestamp;
        unsigned long time_a, time_b;
 
+       hsr = (struct hsr_priv *) data;
+
        rcu_read_lock();
-       list_for_each_entry_rcu(node, &hsr_priv->node_db, mac_list) {
+       list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
                /* Shorthand */
-               time_a = node->time_in[HSR_DEV_SLAVE_A];
-               time_b = node->time_in[HSR_DEV_SLAVE_B];
+               time_a = node->time_in[HSR_PT_SLAVE_A];
+               time_b = node->time_in[HSR_PT_SLAVE_B];
 
                /* Check for timestamps old enough to risk wrap-around */
                if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
-                       node->time_in_stale[HSR_DEV_SLAVE_A] = true;
+                       node->time_in_stale[HSR_PT_SLAVE_A] = true;
                if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
-                       node->time_in_stale[HSR_DEV_SLAVE_B] = true;
+                       node->time_in_stale[HSR_PT_SLAVE_B] = true;
 
                /* Get age of newest frame from node.
                 * At least one time_in is OK here; nodes get pruned long
                 * before both time_ins can get stale
                 */
                timestamp = time_a;
-               if (node->time_in_stale[HSR_DEV_SLAVE_A] ||
-                   (!node->time_in_stale[HSR_DEV_SLAVE_B] &&
+               if (node->time_in_stale[HSR_PT_SLAVE_A] ||
+                   (!node->time_in_stale[HSR_PT_SLAVE_B] &&
                    time_after(time_b, time_a)))
                        timestamp = time_b;
 
                /* Warn of ring error only as long as we get frames at all */
                if (time_is_after_jiffies(timestamp +
                                        msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
-
-                       if (is_late(node, HSR_DEV_SLAVE_A))
-                               hsr_nl_ringerror(hsr_priv, node->MacAddressA,
-                                                HSR_DEV_SLAVE_A);
-                       else if (is_late(node, HSR_DEV_SLAVE_B))
-                               hsr_nl_ringerror(hsr_priv, node->MacAddressA,
-                                                HSR_DEV_SLAVE_B);
+                       rcu_read_lock();
+                       port = get_late_port(hsr, node);
+                       if (port != NULL)
+                               hsr_nl_ringerror(hsr, node->MacAddressA, port);
+                       rcu_read_unlock();
                }
 
                /* Prune old entries */
                if (time_is_before_jiffies(timestamp +
                                        msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
-                       hsr_nl_nodedown(hsr_priv, node->MacAddressA);
+                       hsr_nl_nodedown(hsr, node->MacAddressA);
                        list_del_rcu(&node->mac_list);
                        /* Note that we need to free this entry later: */
                        kfree_rcu(node, rcu_head);
@@ -419,21 +407,21 @@ void hsr_prune_nodes(struct hsr_priv *hsr_priv)
 }
 
 
-void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                        unsigned char addr[ETH_ALEN])
 {
-       struct node_entry *node;
+       struct hsr_node *node;
 
        if (!_pos) {
-               node = list_first_or_null_rcu(&hsr_priv->node_db,
-                                               struct node_entry, mac_list);
+               node = list_first_or_null_rcu(&hsr->node_db,
+                                             struct hsr_node, mac_list);
                if (node)
                        ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
 
        node = _pos;
-       list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
+       list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
                ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
@@ -442,7 +430,7 @@ void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
 }
 
 
-int hsr_get_node_data(struct hsr_priv *hsr_priv,
+int hsr_get_node_data(struct hsr_priv *hsr,
                      const unsigned char *addr,
                      unsigned char addr_b[ETH_ALEN],
                      unsigned int *addr_b_ifindex,
@@ -451,12 +439,13 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                      int *if2_age,
                      u16 *if2_seq)
 {
-       struct node_entry *node;
+       struct hsr_node *node;
+       struct hsr_port *port;
        unsigned long tdiff;
 
 
        rcu_read_lock();
-       node = find_node_by_AddrA(&hsr_priv->node_db, addr);
+       node = find_node_by_AddrA(&hsr->node_db, addr);
        if (!node) {
                rcu_read_unlock();
                return -ENOENT; /* No such entry */
@@ -464,8 +453,8 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
 
        ether_addr_copy(addr_b, node->MacAddressB);
 
-       tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
-       if (node->time_in_stale[HSR_DEV_SLAVE_A])
+       tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
+       if (node->time_in_stale[HSR_PT_SLAVE_A])
                *if1_age = INT_MAX;
 #if HZ <= MSEC_PER_SEC
        else if (tdiff > msecs_to_jiffies(INT_MAX))
@@ -474,8 +463,8 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
        else
                *if1_age = jiffies_to_msecs(tdiff);
 
-       tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_B];
-       if (node->time_in_stale[HSR_DEV_SLAVE_B])
+       tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
+       if (node->time_in_stale[HSR_PT_SLAVE_B])
                *if2_age = INT_MAX;
 #if HZ <= MSEC_PER_SEC
        else if (tdiff > msecs_to_jiffies(INT_MAX))
@@ -485,13 +474,15 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                *if2_age = jiffies_to_msecs(tdiff);
 
        /* Present sequence numbers as if they were incoming on interface */
-       *if1_seq = node->seq_out[HSR_DEV_SLAVE_B];
-       *if2_seq = node->seq_out[HSR_DEV_SLAVE_A];
+       *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
+       *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
 
-       if ((node->AddrB_if != HSR_DEV_NONE) && hsr_priv->slave[node->AddrB_if])
-               *addr_b_ifindex = hsr_priv->slave[node->AddrB_if]->ifindex;
-       else
+       if (node->AddrB_port != HSR_PT_NONE) {
+               port = hsr_port_get_hsr(hsr, node->AddrB_port);
+               *addr_b_ifindex = port->dev->ifindex;
+       } else {
                *addr_b_ifindex = -1;
+       }
 
        rcu_read_unlock();
 
index e6c4022030ad17f74d6007d44d9fc9231ea01fac..438b40f98f5a986e50180c351d55c9dbb0b66977 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,42 +6,43 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
-#ifndef _HSR_FRAMEREG_H
-#define _HSR_FRAMEREG_H
+#ifndef __HSR_FRAMEREG_H
+#define __HSR_FRAMEREG_H
 
 #include "hsr_main.h"
 
-struct node_entry;
+struct hsr_node;
 
-struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb);
+struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+                             u16 seq_out);
+struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+                             bool is_sup);
+void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
+                         struct hsr_port *port);
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
 
-struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
-                                 struct node_entry *node,
-                                 struct sk_buff *skb,
-                                 enum hsr_dev_idx dev_idx);
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+                        struct hsr_port *port);
 
-void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb);
-void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
-                        enum hsr_dev_idx dev_idx);
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+                          u16 sequence_nr);
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+                          u16 sequence_nr);
 
-void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx);
-
-int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
-                          struct sk_buff *skb);
-
-void hsr_prune_nodes(struct hsr_priv *hsr_priv);
+void hsr_prune_nodes(unsigned long data);
 
 int hsr_create_self_node(struct list_head *self_node_db,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN]);
 
-void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                        unsigned char addr[ETH_ALEN]);
 
-int hsr_get_node_data(struct hsr_priv *hsr_priv,
+int hsr_get_node_data(struct hsr_priv *hsr,
                      const unsigned char *addr,
                      unsigned char addr_b[ETH_ALEN],
                      unsigned int *addr_b_ifindex,
@@ -50,4 +51,4 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                      int *if2_age,
                      u16 *if2_seq);
 
-#endif /* _HSR_FRAMEREG_H */
+#endif /* __HSR_FRAMEREG_H */
index 3fee5218a691f20c8b028e7ed2738febc7dfd0b7..779d28b65417a6e62b687d8f5ea36d6be285f417 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,11 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
- *
- * In addition to routines for registering and unregistering HSR support, this
- * file also contains the receive routine that handles all incoming frames with
- * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #include <linux/netdevice.h>
 #include "hsr_device.h"
 #include "hsr_netlink.h"
 #include "hsr_framereg.h"
-
-
-/* List of all registered virtual HSR devices */
-static LIST_HEAD(hsr_list);
-
-void register_hsr_master(struct hsr_priv *hsr_priv)
-{
-       list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list);
-}
-
-void unregister_hsr_master(struct hsr_priv *hsr_priv)
-{
-       struct hsr_priv *hsr_priv_it;
-
-       list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list)
-               if (hsr_priv_it == hsr_priv) {
-                       list_del_rcu(&hsr_priv_it->hsr_list);
-                       return;
-               }
-}
-
-bool is_hsr_slave(struct net_device *dev)
-{
-       struct hsr_priv *hsr_priv_it;
-
-       list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) {
-               if (dev == hsr_priv_it->slave[0])
-                       return true;
-               if (dev == hsr_priv_it->slave[1])
-                       return true;
-       }
-
-       return false;
-}
-
-
-/* If dev is a HSR slave device, return the virtual master device. Return NULL
- * otherwise.
- */
-static struct hsr_priv *get_hsr_master(struct net_device *dev)
-{
-       struct hsr_priv *hsr_priv;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
-               if ((dev == hsr_priv->slave[0]) ||
-                   (dev == hsr_priv->slave[1])) {
-                       rcu_read_unlock();
-                       return hsr_priv;
-               }
-
-       rcu_read_unlock();
-       return NULL;
-}
-
-
-/* If dev is a HSR slave device, return the other slave device. Return NULL
- * otherwise.
- */
-static struct net_device *get_other_slave(struct hsr_priv *hsr_priv,
-                                         struct net_device *dev)
-{
-       if (dev == hsr_priv->slave[0])
-               return hsr_priv->slave[1];
-       if (dev == hsr_priv->slave[1])
-               return hsr_priv->slave[0];
-
-       return NULL;
-}
+#include "hsr_slave.h"
 
 
 static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                             void *ptr)
 {
-       struct net_device *slave, *other_slave;
-       struct hsr_priv *hsr_priv;
-       int old_operstate;
+       struct net_device *dev;
+       struct hsr_port *port, *master;
+       struct hsr_priv *hsr;
        int mtu_max;
        int res;
-       struct net_device *dev;
 
        dev = netdev_notifier_info_to_dev(ptr);
-
-       hsr_priv = get_hsr_master(dev);
-       if (hsr_priv) {
-               /* dev is a slave device */
-               slave = dev;
-               other_slave = get_other_slave(hsr_priv, slave);
-       } else {
+       port = hsr_port_get_rtnl(dev);
+       if (port == NULL) {
                if (!is_hsr_master(dev))
-                       return NOTIFY_DONE;
-               hsr_priv = netdev_priv(dev);
-               slave = hsr_priv->slave[0];
-               other_slave = hsr_priv->slave[1];
+                       return NOTIFY_DONE;     /* Not an HSR device */
+               hsr = netdev_priv(dev);
+               port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       } else {
+               hsr = port->hsr;
        }
 
        switch (event) {
        case NETDEV_UP:         /* Administrative state DOWN */
        case NETDEV_DOWN:       /* Administrative state UP */
        case NETDEV_CHANGE:     /* Link (carrier) state changes */
-               old_operstate = hsr_priv->dev->operstate;
-               hsr_set_carrier(hsr_priv->dev, slave, other_slave);
-               /* netif_stacked_transfer_operstate() cannot be used here since
-                * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
-                */
-               hsr_set_operstate(hsr_priv->dev, slave, other_slave);
-               hsr_check_announce(hsr_priv->dev, old_operstate);
+               hsr_check_carrier_and_operstate(hsr);
                break;
        case NETDEV_CHANGEADDR:
-
-               /* This should not happen since there's no ndo_set_mac_address()
-                * for HSR devices - i.e. not supported.
-                */
-               if (dev == hsr_priv->dev)
+               if (port->type == HSR_PT_MASTER) {
+                       /* This should not happen since there's no
+                        * ndo_set_mac_address() for HSR devices - i.e. not
+                        * supported.
+                        */
                        break;
+               }
 
-               if (dev == hsr_priv->slave[0])
-                       ether_addr_copy(hsr_priv->dev->dev_addr,
-                                       hsr_priv->slave[0]->dev_addr);
+               master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+
+               if (port->type == HSR_PT_SLAVE_A) {
+                       ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR, master->dev);
+               }
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
-               res = hsr_create_self_node(&hsr_priv->self_node_db,
-                                          hsr_priv->dev->dev_addr,
-                                          hsr_priv->slave[1] ?
-                                               hsr_priv->slave[1]->dev_addr :
-                                               hsr_priv->dev->dev_addr);
+               port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+               res = hsr_create_self_node(&hsr->self_node_db,
+                                          master->dev->dev_addr,
+                                          port ?
+                                               port->dev->dev_addr :
+                                               master->dev->dev_addr);
                if (res)
-                       netdev_warn(hsr_priv->dev,
+                       netdev_warn(master->dev,
                                    "Could not update HSR node address.\n");
-
-               if (dev == hsr_priv->slave[0])
-                       call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev);
                break;
        case NETDEV_CHANGEMTU:
-               if (dev == hsr_priv->dev)
+               if (port->type == HSR_PT_MASTER)
                        break; /* Handled in ndo_change_mtu() */
-               mtu_max = hsr_get_max_mtu(hsr_priv);
-               if (hsr_priv->dev->mtu > mtu_max)
-                       dev_set_mtu(hsr_priv->dev, mtu_max);
+               mtu_max = hsr_get_max_mtu(port->hsr);
+               master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
+               master->dev->mtu = mtu_max;
                break;
        case NETDEV_UNREGISTER:
-               if (dev == hsr_priv->slave[0])
-                       hsr_priv->slave[0] = NULL;
-               if (dev == hsr_priv->slave[1])
-                       hsr_priv->slave[1] = NULL;
-
-               /* There should really be a way to set a new slave device... */
-
+               hsr_del_port(port);
                break;
        case NETDEV_PRE_TYPE_CHANGE:
                /* HSR works only on Ethernet devices. Refuse slave to change
@@ -181,255 +94,16 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 }
 
 
-static struct timer_list prune_timer;
-
-static void prune_nodes_all(unsigned long data)
-{
-       struct hsr_priv *hsr_priv;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
-               hsr_prune_nodes(hsr_priv);
-       rcu_read_unlock();
-
-       prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
-       add_timer(&prune_timer);
-}
-
-
-static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
 {
-       struct hsr_tag *hsr_tag;
-       struct sk_buff *skb2;
-
-       skb2 = skb_share_check(skb, GFP_ATOMIC);
-       if (unlikely(!skb2))
-               goto err_free;
-       skb = skb2;
-
-       if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN)))
-               goto err_free;
+       struct hsr_port *port;
 
-       hsr_tag = (struct hsr_tag *) skb->data;
-       skb->protocol = hsr_tag->encap_proto;
-       skb_pull(skb, HSR_TAGLEN);
-
-       return skb;
-
-err_free:
-       kfree_skb(skb);
+       hsr_for_each_port(hsr, port)
+               if (port->type == pt)
+                       return port;
        return NULL;
 }
 
-
-/* The uses I can see for these HSR supervision frames are:
- * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
- *    22") to reset any sequence_nr counters belonging to that node. Useful if
- *    the other node's counter has been reset for some reason.
- *    --
- *    Or not - resetting the counter and bridging the frame would create a
- *    loop, unfortunately.
- *
- * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
- *    frame is received from a particular node, we know something is wrong.
- *    We just register these (as with normal frames) and throw them away.
- *
- * 3) Allow different MAC addresses for the two slave interfaces, using the
- *    MacAddressA field.
- */
-static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb)
-{
-       struct hsr_sup_tag *hsr_stag;
-
-       if (!ether_addr_equal(eth_hdr(skb)->h_dest,
-                             hsr_priv->sup_multicast_addr))
-               return false;
-
-       hsr_stag = (struct hsr_sup_tag *) skb->data;
-       if (get_hsr_stag_path(hsr_stag) != 0x0f)
-               return false;
-       if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
-           (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
-               return false;
-       if (hsr_stag->HSR_TLV_Length != 12)
-               return false;
-
-       return true;
-}
-
-
-/* Implementation somewhat according to IEC-62439-3, p. 43
- */
-static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev)
-{
-       struct hsr_priv *hsr_priv;
-       struct net_device *other_slave;
-       struct node_entry *node;
-       bool deliver_to_self;
-       struct sk_buff *skb_deliver;
-       enum hsr_dev_idx dev_in_idx, dev_other_idx;
-       bool dup_out;
-       int ret;
-
-       hsr_priv = get_hsr_master(dev);
-
-       if (!hsr_priv) {
-               /* Non-HSR-slave device 'dev' is connected to a HSR network */
-               kfree_skb(skb);
-               dev->stats.rx_errors++;
-               return NET_RX_SUCCESS;
-       }
-
-       if (dev == hsr_priv->slave[0]) {
-               dev_in_idx = HSR_DEV_SLAVE_A;
-               dev_other_idx = HSR_DEV_SLAVE_B;
-       } else {
-               dev_in_idx = HSR_DEV_SLAVE_B;
-               dev_other_idx = HSR_DEV_SLAVE_A;
-       }
-
-       node = hsr_find_node(&hsr_priv->self_node_db, skb);
-       if (node) {
-               /* Always kill frames sent by ourselves */
-               kfree_skb(skb);
-               return NET_RX_SUCCESS;
-       }
-
-       /* Is this frame a candidate for local reception? */
-       deliver_to_self = false;
-       if ((skb->pkt_type == PACKET_HOST) ||
-           (skb->pkt_type == PACKET_MULTICAST) ||
-           (skb->pkt_type == PACKET_BROADCAST))
-               deliver_to_self = true;
-       else if (ether_addr_equal(eth_hdr(skb)->h_dest,
-                                    hsr_priv->dev->dev_addr)) {
-               skb->pkt_type = PACKET_HOST;
-               deliver_to_self = true;
-       }
-
-
-       rcu_read_lock(); /* node_db */
-       node = hsr_find_node(&hsr_priv->node_db, skb);
-
-       if (is_supervision_frame(hsr_priv, skb)) {
-               skb_pull(skb, sizeof(struct hsr_sup_tag));
-               node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx);
-               if (!node) {
-                       rcu_read_unlock(); /* node_db */
-                       kfree_skb(skb);
-                       hsr_priv->dev->stats.rx_dropped++;
-                       return NET_RX_DROP;
-               }
-               skb_push(skb, sizeof(struct hsr_sup_tag));
-               deliver_to_self = false;
-       }
-
-       if (!node) {
-               /* Source node unknown; this might be a HSR frame from
-                * another net (different multicast address). Ignore it.
-                */
-               rcu_read_unlock(); /* node_db */
-               kfree_skb(skb);
-               return NET_RX_SUCCESS;
-       }
-
-       /* Register ALL incoming frames as outgoing through the other interface.
-        * This allows us to register frames as incoming only if they are valid
-        * for the receiving interface, without using a specific counter for
-        * incoming frames.
-        */
-       dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
-       if (!dup_out)
-               hsr_register_frame_in(node, dev_in_idx);
-
-       /* Forward this frame? */
-       if (!dup_out && (skb->pkt_type != PACKET_HOST))
-               other_slave = get_other_slave(hsr_priv, dev);
-       else
-               other_slave = NULL;
-
-       if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
-               deliver_to_self = false;
-
-       rcu_read_unlock(); /* node_db */
-
-       if (!deliver_to_self && !other_slave) {
-               kfree_skb(skb);
-               /* Circulated frame; silently remove it. */
-               return NET_RX_SUCCESS;
-       }
-
-       skb_deliver = skb;
-       if (deliver_to_self && other_slave) {
-               /* skb_clone() is not enough since we will strip the hsr tag
-                * and do address substitution below
-                */
-               skb_deliver = pskb_copy(skb, GFP_ATOMIC);
-               if (!skb_deliver) {
-                       deliver_to_self = false;
-                       hsr_priv->dev->stats.rx_dropped++;
-               }
-       }
-
-       if (deliver_to_self) {
-               bool multicast_frame;
-
-               skb_deliver = hsr_pull_tag(skb_deliver);
-               if (!skb_deliver) {
-                       hsr_priv->dev->stats.rx_dropped++;
-                       goto forward;
-               }
-#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               /* Move everything in the header that is after the HSR tag,
-                * to work around alignment problems caused by the 6-byte HSR
-                * tag. In practice, this removes/overwrites the HSR tag in
-                * the header and restores a "standard" packet.
-                */
-               memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data,
-                       skb_headlen(skb_deliver));
-
-               /* Adjust skb members so they correspond with the move above.
-                * This cannot possibly underflow skb->data since hsr_pull_tag()
-                * above succeeded.
-                * At this point in the protocol stack, the transport and
-                * network headers have not been set yet, and we haven't touched
-                * the mac header nor the head. So we only need to adjust data
-                * and tail:
-                */
-               skb_deliver->data -= HSR_TAGLEN;
-               skb_deliver->tail -= HSR_TAGLEN;
-#endif
-               skb_deliver->dev = hsr_priv->dev;
-               hsr_addr_subst_source(hsr_priv, skb_deliver);
-               multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
-               ret = netif_rx(skb_deliver);
-               if (ret == NET_RX_DROP) {
-                       hsr_priv->dev->stats.rx_dropped++;
-               } else {
-                       hsr_priv->dev->stats.rx_packets++;
-                       hsr_priv->dev->stats.rx_bytes += skb->len;
-                       if (multicast_frame)
-                               hsr_priv->dev->stats.multicast++;
-               }
-       }
-
-forward:
-       if (other_slave) {
-               skb_push(skb, ETH_HLEN);
-               skb->dev = other_slave;
-               dev_queue_xmit(skb);
-       }
-
-       return NET_RX_SUCCESS;
-}
-
-
-static struct packet_type hsr_pt __read_mostly = {
-       .type = htons(ETH_P_PRP),
-       .func = hsr_rcv,
-};
-
 static struct notifier_block hsr_nb = {
        .notifier_call = hsr_netdev_notify,     /* Slave event notifications */
 };
@@ -439,18 +113,9 @@ static int __init hsr_init(void)
 {
        int res;
 
-       BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN);
-
-       dev_add_pack(&hsr_pt);
-
-       init_timer(&prune_timer);
-       prune_timer.function = prune_nodes_all;
-       prune_timer.data = 0;
-       prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
-       add_timer(&prune_timer);
+       BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
 
        register_netdevice_notifier(&hsr_nb);
-
        res = hsr_netlink_init();
 
        return res;
@@ -459,9 +124,7 @@ static int __init hsr_init(void)
 static void __exit hsr_exit(void)
 {
        unregister_netdevice_notifier(&hsr_nb);
-       del_timer_sync(&prune_timer);
        hsr_netlink_exit();
-       dev_remove_pack(&hsr_pt);
 }
 
 module_init(hsr_init);
index 56fe060c0ab1872330adda30daace83c429313ff..5a9c69962ded0284c62f6b0ab7767ceabecc6aa8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,11 +6,11 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
-#ifndef _HSR_PRIVATE_H
-#define _HSR_PRIVATE_H
+#ifndef __HSR_PRIVATE_H
+#define __HSR_PRIVATE_H
 
 #include <linux/netdevice.h>
 #include <linux/list.h>
@@ -29,6 +29,7 @@
  * each node differ before we notify of communication problem?
  */
 #define MAX_SLAVE_DIFF                  3000 /* ms */
+#define HSR_SEQNR_START                        (USHRT_MAX - 1024)
 
 
 /* How often shall we check for broken ring and remove node entries older than
  * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
  * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
  * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
  */
-#define HSR_TAGLEN     6
-
-/* Field names below as defined in the IEC:2010 standard for HSR. */
 struct hsr_tag {
        __be16          path_and_LSDU_size;
        __be16          sequence_nr;
        __be16          encap_proto;
 } __packed;
 
+#define HSR_HLEN       6
 
 /* The helper functions below assumes that 'path' occupies the 4 most
  * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
@@ -136,31 +137,47 @@ struct hsr_ethhdr_sp {
 } __packed;
 
 
-enum hsr_dev_idx {
-       HSR_DEV_NONE = -1,
-       HSR_DEV_SLAVE_A = 0,
-       HSR_DEV_SLAVE_B,
-       HSR_DEV_MASTER,
+enum hsr_port_type {
+       HSR_PT_NONE = 0,        /* Must be 0, used by framereg */
+       HSR_PT_SLAVE_A,
+       HSR_PT_SLAVE_B,
+       HSR_PT_INTERLINK,
+       HSR_PT_MASTER,
+       HSR_PT_PORTS,   /* This must be the last item in the enum */
+};
+
+struct hsr_port {
+       struct list_head        port_list;
+       struct net_device       *dev;
+       struct hsr_priv         *hsr;
+       enum hsr_port_type      type;
 };
-#define HSR_MAX_SLAVE  (HSR_DEV_SLAVE_B + 1)
-#define HSR_MAX_DEV    (HSR_DEV_MASTER + 1)
 
 struct hsr_priv {
-       struct list_head        hsr_list;       /* List of hsr devices */
        struct rcu_head         rcu_head;
-       struct net_device       *dev;
-       struct net_device       *slave[HSR_MAX_SLAVE];
-       struct list_head        node_db;        /* Other HSR nodes */
+       struct list_head        ports;
+       struct list_head        node_db;        /* Known HSR nodes */
        struct list_head        self_node_db;   /* MACs of slaves */
        struct timer_list       announce_timer; /* Supervision frame dispatch */
+       struct timer_list       prune_timer;
        int announce_count;
        u16 sequence_nr;
        spinlock_t seqnr_lock;                  /* locking for sequence_nr */
        unsigned char           sup_multicast_addr[ETH_ALEN];
 };
 
-void register_hsr_master(struct hsr_priv *hsr_priv);
-void unregister_hsr_master(struct hsr_priv *hsr_priv);
-bool is_hsr_slave(struct net_device *dev);
+#define hsr_for_each_port(hsr, port) \
+       list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+/* Caller must ensure skb is a valid HSR frame */
+static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
+{
+       struct hsr_ethhdr *hsr_ethhdr;
+
+       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+       return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
+}
 
-#endif /*  _HSR_PRIVATE_H */
+#endif /*  __HSR_PRIVATE_H */
index 01a5261ac7a5520230fb0d2e147ffd3cf0a930ec..a2c7e4c0ac1ed8929f14786a09965663333f9619 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * Routines for handling Netlink messages for HSR.
  */
@@ -37,13 +37,17 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        struct net_device *link[2];
        unsigned char multicast_spec;
 
+       if (!data) {
+               netdev_info(dev, "HSR: No slave devices specified\n");
+               return -EINVAL;
+       }
        if (!data[IFLA_HSR_SLAVE1]) {
-               netdev_info(dev, "IFLA_HSR_SLAVE1 missing!\n");
+               netdev_info(dev, "HSR: Slave1 device not specified\n");
                return -EINVAL;
        }
        link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
        if (!data[IFLA_HSR_SLAVE2]) {
-               netdev_info(dev, "IFLA_HSR_SLAVE2 missing!\n");
+               netdev_info(dev, "HSR: Slave2 device not specified\n");
                return -EINVAL;
        }
        link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
@@ -63,21 +67,33 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
 
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
+       int res;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
 
-       if (hsr_priv->slave[0])
-               if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
-                       goto nla_put_failure;
+       res = 0;
 
-       if (hsr_priv->slave[1])
-               if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
-                       goto nla_put_failure;
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (port)
+               res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
+       rcu_read_unlock();
+       if (res)
+               goto nla_put_failure;
+
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+       if (port)
+               res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
+       rcu_read_unlock();
+       if (res)
+               goto nla_put_failure;
 
        if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
-                   hsr_priv->sup_multicast_addr) ||
-           nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
+                   hsr->sup_multicast_addr) ||
+           nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
                goto nla_put_failure;
 
        return 0;
@@ -128,13 +144,13 @@ static const struct genl_multicast_group hsr_mcgrps[] = {
  * over one of the slave interfaces. This would indicate an open network ring
  * (i.e. a link has failed somewhere).
  */
-void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
-                     enum hsr_dev_idx dev_idx)
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+                     struct hsr_port *port)
 {
        struct sk_buff *skb;
        void *msg_head;
+       struct hsr_port *master;
        int res;
-       int ifindex;
 
        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
        if (!skb)
@@ -148,11 +164,7 @@ void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
        if (res < 0)
                goto nla_put_failure;
 
-       if (hsr_priv->slave[dev_idx])
-               ifindex = hsr_priv->slave[dev_idx]->ifindex;
-       else
-               ifindex = -1;
-       res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex);
+       res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
        if (res < 0)
                goto nla_put_failure;
 
@@ -165,16 +177,20 @@ nla_put_failure:
        kfree_skb(skb);
 
 fail:
-       netdev_warn(hsr_priv->dev, "Could not send HSR ring error message\n");
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_warn(master->dev, "Could not send HSR ring error message\n");
+       rcu_read_unlock();
 }
 
 /* This is called when we haven't heard from the node with MAC address addr for
  * some time (just before the node is removed from the node table/list).
  */
-void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN])
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
 {
        struct sk_buff *skb;
        void *msg_head;
+       struct hsr_port *master;
        int res;
 
        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -199,7 +215,10 @@ nla_put_failure:
        kfree_skb(skb);
 
 fail:
-       netdev_warn(hsr_priv->dev, "Could not send HSR node down\n");
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_warn(master->dev, "Could not send HSR node down\n");
+       rcu_read_unlock();
 }
 
 
@@ -220,7 +239,8 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        /* For sending */
        struct sk_buff *skb_out;
        void *msg_head;
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
        unsigned char hsr_node_addr_b[ETH_ALEN];
        int hsr_node_if1_age;
        u16 hsr_node_if1_seq;
@@ -267,8 +287,8 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        if (res < 0)
                goto nla_put_failure;
 
-       hsr_priv = netdev_priv(hsr_dev);
-       res = hsr_get_node_data(hsr_priv,
+       hsr = netdev_priv(hsr_dev);
+       res = hsr_get_node_data(hsr,
                        (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
                        hsr_node_addr_b,
                        &addr_b_ifindex,
@@ -301,9 +321,12 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
        if (res < 0)
                goto nla_put_failure;
-       if (hsr_priv->slave[0])
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (port)
                res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
-                                               hsr_priv->slave[0]->ifindex);
+                                 port->dev->ifindex);
+       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
@@ -313,9 +336,14 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
        if (res < 0)
                goto nla_put_failure;
-       if (hsr_priv->slave[1])
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+       if (port)
                res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
-                                               hsr_priv->slave[1]->ifindex);
+                                 port->dev->ifindex);
+       rcu_read_unlock();
+       if (res < 0)
+               goto nla_put_failure;
 
        genlmsg_end(skb_out, msg_head);
        genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
@@ -334,7 +362,7 @@ fail:
        return res;
 }
 
-/* Get a list of MacAddressA of all nodes known to this node (other than self).
+/* Get a list of MacAddressA of all nodes known to this node (including self).
  */
 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 {
@@ -345,7 +373,7 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        /* For sending */
        struct sk_buff *skb_out;
        void *msg_head;
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
        void *pos;
        unsigned char addr[ETH_ALEN];
        int res;
@@ -385,17 +413,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        if (res < 0)
                goto nla_put_failure;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
 
        rcu_read_lock();
-       pos = hsr_get_next_node(hsr_priv, NULL, addr);
+       pos = hsr_get_next_node(hsr, NULL, addr);
        while (pos) {
                res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
                if (res < 0) {
                        rcu_read_unlock();
                        goto nla_put_failure;
                }
-               pos = hsr_get_next_node(hsr_priv, pos, addr);
+               pos = hsr_get_next_node(hsr, pos, addr);
        }
        rcu_read_unlock();
 
index d4579dcc3c7d50adde22a34c01008c46028283d8..3f6b95b5b6b8841b045493468ccce641ea55d258 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #ifndef __HSR_NETLINK_H
 #include <uapi/linux/hsr_netlink.h>
 
 struct hsr_priv;
+struct hsr_port;
 
 int __init hsr_netlink_init(void);
 void __exit hsr_netlink_exit(void);
 
-void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
-                     int dev_idx);
-void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN]);
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+                     struct hsr_port *port);
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]);
 void hsr_nl_framedrop(int dropcount, int dev_idx);
 void hsr_nl_linkdown(int dev_idx);
 
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
new file mode 100644 (file)
index 0000000..a348dcb
--- /dev/null
@@ -0,0 +1,196 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#include "hsr_slave.h"
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_forward.h"
+#include "hsr_framereg.h"
+
+
+static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct hsr_port *port;
+
+       if (!skb_mac_header_was_set(skb)) {
+               WARN_ONCE(1, "%s: skb invalid", __func__);
+               return RX_HANDLER_PASS;
+       }
+
+       rcu_read_lock(); /* hsr->node_db, hsr->ports */
+       port = hsr_port_get_rcu(skb->dev);
+
+       if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
+               /* Directly kill frames sent by ourselves */
+               kfree_skb(skb);
+               goto finish_consume;
+       }
+
+       if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
+               goto finish_pass;
+
+       skb_push(skb, ETH_HLEN);
+
+       hsr_forward_skb(skb, port);
+
+finish_consume:
+       rcu_read_unlock(); /* hsr->node_db, hsr->ports */
+       return RX_HANDLER_CONSUMED;
+
+finish_pass:
+       rcu_read_unlock(); /* hsr->node_db, hsr->ports */
+       return RX_HANDLER_PASS;
+}
+
+bool hsr_port_exists(const struct net_device *dev)
+{
+       return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
+}
+
+
+static int hsr_check_dev_ok(struct net_device *dev)
+{
+       /* Don't allow HSR on non-ethernet like devices */
+       if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
+           (dev->addr_len != ETH_ALEN)) {
+               netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+               return -EINVAL;
+       }
+
+       /* Don't allow enslaving hsr devices */
+       if (is_hsr_master(dev)) {
+               netdev_info(dev, "Cannot create trees of HSR devices.\n");
+               return -EINVAL;
+       }
+
+       if (hsr_port_exists(dev)) {
+               netdev_info(dev, "This device is already a HSR slave.\n");
+               return -EINVAL;
+       }
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+               return -EINVAL;
+       }
+
+       if (dev->priv_flags & IFF_DONT_BRIDGE) {
+               netdev_info(dev, "This device does not support bridging.\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* HSR over bonded devices has not been tested, but I'm not sure it
+        * won't work...
+        */
+
+       return 0;
+}
+
+
+/* Setup device to be added to the HSR bridge. */
+static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
+{
+       int res;
+
+       dev_hold(dev);
+       res = dev_set_promiscuity(dev, 1);
+       if (res)
+               goto fail_promiscuity;
+
+       /* FIXME:
+        * What does net device "adjacency" mean? Should we do
+        * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ?
+        */
+
+       res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
+       if (res)
+               goto fail_rx_handler;
+       dev_disable_lro(dev);
+
+       return 0;
+
+fail_rx_handler:
+       dev_set_promiscuity(dev, -1);
+fail_promiscuity:
+       dev_put(dev);
+
+       return res;
+}
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+                enum hsr_port_type type)
+{
+       struct hsr_port *port, *master;
+       int res;
+
+       if (type != HSR_PT_MASTER) {
+               res = hsr_check_dev_ok(dev);
+               if (res)
+                       return res;
+       }
+
+       port = hsr_port_get_hsr(hsr, type);
+       if (port != NULL)
+               return -EBUSY;  /* This port already exists */
+
+       port = kzalloc(sizeof(*port), GFP_KERNEL);
+       if (port == NULL)
+               return -ENOMEM;
+
+       if (type != HSR_PT_MASTER) {
+               res = hsr_portdev_setup(dev, port);
+               if (res)
+                       goto fail_dev_setup;
+       }
+
+       port->hsr = hsr;
+       port->dev = dev;
+       port->type = type;
+
+       list_add_tail_rcu(&port->port_list, &hsr->ports);
+       synchronize_rcu();
+
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_update_features(master->dev);
+       dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+
+       return 0;
+
+fail_dev_setup:
+       kfree(port);
+       return res;
+}
+
+void hsr_del_port(struct hsr_port *port)
+{
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
+
+       hsr = port->hsr;
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       list_del_rcu(&port->port_list);
+
+       if (port != master) {
+               netdev_update_features(master->dev);
+               dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               netdev_rx_handler_unregister(port->dev);
+               dev_set_promiscuity(port->dev, -1);
+       }
+
+       /* FIXME?
+        * netdev_upper_dev_unlink(port->dev, port->hsr->dev);
+        */
+
+       synchronize_rcu();
+       dev_put(port->dev);
+}
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
new file mode 100644 (file)
index 0000000..3ccfbf7
--- /dev/null
@@ -0,0 +1,38 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#ifndef __HSR_SLAVE_H
+#define __HSR_SLAVE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include "hsr_main.h"
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+                enum hsr_port_type pt);
+void hsr_del_port(struct hsr_port *port);
+bool hsr_port_exists(const struct net_device *dev);
+
+static inline struct hsr_port *hsr_port_get_rtnl(const struct net_device *dev)
+{
+       ASSERT_RTNL();
+       return hsr_port_exists(dev) ?
+                               rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
+static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev)
+{
+       return hsr_port_exists(dev) ?
+                               rcu_dereference(dev->rx_handler_data) : NULL;
+}
+
+#endif /* __HSR_SLAVE_H */
index 211b5686d719679242d6a7b5e0cd65547ae37cd3..a1b7117a9600f3954851e02c4df3894c908aa658 100644 (file)
@@ -3,8 +3,7 @@
  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
-/*
- * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #include <net/ipv6.h>
 #include <net/af_ieee802154.h>
 
-/*
- * Uncompress address function for source and
+/* Uncompress address function for source and
  * destination address(non-multicast).
  *
  * address_mode is sam value or dam value.
  */
 static int uncompress_addr(struct sk_buff *skb,
-                               struct in6_addr *ipaddr, const u8 address_mode,
-                               const u8 *lladdr, const u8 addr_type,
-                               const u8 addr_len)
+                          struct in6_addr *ipaddr, const u8 address_mode,
+                          const u8 *lladdr, const u8 addr_type,
+                          const u8 addr_len)
 {
        bool fail;
 
@@ -140,13 +138,12 @@ static int uncompress_addr(struct sk_buff *skb,
        return 0;
 }
 
-/*
- * Uncompress address function for source context
+/* Uncompress address function for source context
  * based address(non-multicast).
  */
 static int uncompress_context_based_src_addr(struct sk_buff *skb,
-                                               struct in6_addr *ipaddr,
-                                               const u8 sam)
+                                            struct in6_addr *ipaddr,
+                                            const u8 sam)
 {
        switch (sam) {
        case LOWPAN_IPHC_ADDR_00:
@@ -175,13 +172,13 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb,
 }
 
 static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
-               struct net_device *dev, skb_delivery_cb deliver_skb)
+                      struct net_device *dev, skb_delivery_cb deliver_skb)
 {
        struct sk_buff *new;
        int stat;
 
-       new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
-                                                               GFP_ATOMIC);
+       new = skb_copy_expand(skb, sizeof(struct ipv6hdr),
+                             skb_tailroom(skb), GFP_ATOMIC);
        kfree_skb(skb);
 
        if (!new)
@@ -196,7 +193,7 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
        new->dev = dev;
 
        raw_dump_table(__func__, "raw skb data dump before receiving",
-                       new->data, new->len);
+                      new->data, new->len);
 
        stat = deliver_skb(new, dev);
 
@@ -210,8 +207,8 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
  */
 static int
 lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
-               struct in6_addr *ipaddr,
-               const u8 dam)
+                                 struct in6_addr *ipaddr,
+                                 const u8 dam)
 {
        bool fail;
 
@@ -300,7 +297,6 @@ uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
                default:
                        pr_debug("ERROR: unknown UDP format\n");
                        goto err;
-                       break;
                }
 
                pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
@@ -314,8 +310,7 @@ uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
                        fail |= lowpan_fetch_skb(skb, &uh->check, 2);
                }
 
-               /*
-                * UDP lenght needs to be infered from the lower layers
+               /* UDP lenght needs to be infered from the lower layers
                 * here, we obtain the hint from the remaining size of the
                 * frame
                 */
@@ -338,16 +333,17 @@ err:
 static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
 
 int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
-               const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
-               const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
-               u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
+                       const u8 *saddr, const u8 saddr_type,
+                       const u8 saddr_len, const u8 *daddr,
+                       const u8 daddr_type, const u8 daddr_len,
+                       u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
 {
        struct ipv6hdr hdr = {};
        u8 tmp, num_context = 0;
        int err;
 
        raw_dump_table(__func__, "raw skb data dump uncompressed",
-                               skb->data, skb->len);
+                      skb->data, skb->len);
 
        /* another if the CID flag is set */
        if (iphc1 & LOWPAN_IPHC_CID) {
@@ -360,8 +356,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
 
        /* Traffic Class and Flow Label */
        switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-       /*
-        * Traffic Class and FLow Label carried in-line
+       /* Traffic Class and FLow Label carried in-line
         * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
         */
        case 0: /* 00b */
@@ -374,8 +369,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
                                        (hdr.flow_lbl[0] & 0x0f);
                break;
-       /*
-        * Traffic class carried in-line
+       /* Traffic class carried in-line
         * ECN + DSCP (1 byte), Flow Label is elided
         */
        case 2: /* 10b */
@@ -385,8 +379,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.priority = ((tmp >> 2) & 0x0f);
                hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
                break;
-       /*
-        * Flow Label carried in-line
+       /* Flow Label carried in-line
         * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
         */
        case 1: /* 01b */
@@ -415,9 +408,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* Hop Limit */
-       if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
+       if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
                hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
-       else {
+       else {
                if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
                        goto drop;
        }
@@ -429,12 +422,12 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                /* Source address context based uncompression */
                pr_debug("SAC bit is set. Handle context based source address.\n");
                err = uncompress_context_based_src_addr(
-                               skb, &hdr.saddr, tmp);
+                                               skb, &hdr.saddr, tmp);
        } else {
                /* Source address uncompression */
                pr_debug("source address stateless compression\n");
                err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
-                                       saddr_type, saddr_len);
+                                     saddr_type, saddr_len);
        }
 
        /* Check on error of previous branch */
@@ -457,9 +450,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                }
        } else {
                err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
-                                       daddr_type, daddr_len);
+                                     daddr_type, daddr_len);
                pr_debug("dest: stateless compression mode %d dest %pI6c\n",
-                       tmp, &hdr.daddr);
+                        tmp, &hdr.daddr);
                if (err)
                        goto drop;
        }
@@ -468,11 +461,11 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
        if (iphc0 & LOWPAN_IPHC_NH_C) {
                struct udphdr uh;
                struct sk_buff *new;
+
                if (uncompress_udp_header(skb, &uh))
                        goto drop;
 
-               /*
-                * replace the compressed UDP head by the uncompressed UDP
+               /* replace the compressed UDP head by the uncompressed UDP
                 * header
                 */
                new = skb_copy_expand(skb, sizeof(struct udphdr),
@@ -489,7 +482,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
 
                raw_dump_table(__func__, "raw UDP header dump",
-                                     (u8 *)&uh, sizeof(uh));
+                              (u8 *)&uh, sizeof(uh));
 
                hdr.nexthdr = UIP_PROTO_UDP;
        }
@@ -504,8 +497,8 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
                hdr.hop_limit, &hdr.daddr);
 
-       raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
-                                                       sizeof(hdr));
+       raw_dump_table(__func__, "raw header dump",
+                      (u8 *)&hdr, sizeof(hdr));
 
        return skb_deliver(skb, &hdr, dev, deliver_skb);
 
@@ -516,8 +509,8 @@ drop:
 EXPORT_SYMBOL_GPL(lowpan_process_data);
 
 static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
-                               const struct in6_addr *ipaddr,
-                               const unsigned char *lladdr)
+                                 const struct in6_addr *ipaddr,
+                                 const unsigned char *lladdr)
 {
        u8 val = 0;
 
@@ -530,14 +523,14 @@ static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
                *hc06_ptr += 2;
                val = 2; /* 16-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
-                       *hc06_ptr - 2, 2);
+                               *hc06_ptr - 2, 2);
        } else {
                /* do not compress IID => xxxx::IID */
                memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
                *hc06_ptr += 8;
                val = 1; /* 64-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
-                       *hc06_ptr - 8, 8);
+                               *hc06_ptr - 8, 8);
        }
 
        return rol8(val, shift);
@@ -601,8 +594,8 @@ static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
 }
 
 int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-                       unsigned short type, const void *_daddr,
-                       const void *_saddr, unsigned int len)
+                          unsigned short type, const void *_daddr,
+                          const void *_saddr, unsigned int len)
 {
        u8 tmp, iphc0, iphc1, *hc06_ptr;
        struct ipv6hdr *hdr;
@@ -616,14 +609,13 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
 
        pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength  = %d\n"
                 "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest    = %pI6c\n",
-               hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
-               hdr->hop_limit, &hdr->daddr);
+                hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
+                hdr->hop_limit, &hdr->daddr);
 
        raw_dump_table(__func__, "raw skb network header dump",
-               skb_network_header(skb), sizeof(struct ipv6hdr));
+                      skb_network_header(skb), sizeof(struct ipv6hdr));
 
-       /*
-        * As we copy some bit-length fields, in the IPHC encoding bytes,
+       /* As we copy some bit-length fields, in the IPHC encoding bytes,
         * we sometimes use |=
         * If the field is 0, and the current bit value in memory is 1,
         * this does not work. We therefore reset the IPHC encoding here
@@ -639,11 +631,10 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                        (unsigned char *)_daddr, IEEE802154_ADDR_LEN);
 
        raw_dump_table(__func__,
-                       "sending raw skb network uncompressed packet",
-                       skb->data, skb->len);
+                      "sending raw skb network uncompressed packet",
+                      skb->data, skb->len);
 
-       /*
-        * Traffic class, flow label
+       /* Traffic class, flow label
         * If flow label is 0, compress it. If traffic class is 0, compress it
         * We have to process both in the same time as the offset of traffic
         * class depends on the presence of version and flow label
@@ -654,11 +645,11 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
 
        if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
-            (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
+           (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
                /* flow label can be compressed */
                iphc0 |= LOWPAN_IPHC_FL_C;
                if ((hdr->priority == 0) &&
-                  ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
                        /* compress (elide) all */
                        iphc0 |= LOWPAN_IPHC_TC_C;
                } else {
@@ -669,7 +660,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        } else {
                /* Flow label cannot be compressed */
                if ((hdr->priority == 0) &&
-                  ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
                        /* compress only traffic class */
                        iphc0 |= LOWPAN_IPHC_TC_C;
                        *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
@@ -695,8 +686,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                hc06_ptr += 1;
        }
 
-       /*
-        * Hop limit
+       /* Hop limit
         * if 1:   compress, encoding is 01
         * if 64:  compress, encoding is 10
         * if 255: compress, encoding is 11
@@ -793,7 +783,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len);
 
        raw_dump_table(__func__, "raw skb data dump compressed",
-                               skb->data, skb->len);
+                      skb->data, skb->len);
        return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
index fe6bd7a7108169138faf198fefcab7f3217e9c55..016b77ee88f0e2f1394c0456f67e9e01ec190d9f 100644 (file)
@@ -80,14 +80,14 @@ lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 static inline void lowpan_address_flip(u8 *src, u8 *dest)
 {
        int i;
+
        for (i = 0; i < IEEE802154_ADDR_LEN; i++)
                (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
 }
 
-static int lowpan_header_create(struct sk_buff *skb,
-                          struct net_device *dev,
-                          unsigned short type, const void *_daddr,
-                          const void *_saddr, unsigned int len)
+static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+                               unsigned short type, const void *_daddr,
+                               const void *_saddr, unsigned int len)
 {
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
@@ -144,7 +144,7 @@ static int lowpan_header_create(struct sk_buff *skb,
 }
 
 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                       struct net_device *dev)
+                                     struct net_device *dev)
 {
        struct lowpan_dev_record *entry;
        struct sk_buff *skb_cp;
@@ -368,24 +368,28 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
 }
 
 static __le16 lowpan_get_pan_id(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
 }
 
 static __le16 lowpan_get_short_addr(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
 }
 
 static u8 lowpan_get_dsn(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
 }
 
@@ -454,7 +458,7 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 }
 
 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                     struct packet_type *pt, struct net_device *orig_dev)
 {
        struct ieee802154_hdr hdr;
        int ret;
index 351d9a94ec2faa429612d6b7da26b09b63e49a25..29e0de63001b68930f11eab46e6d444a8ee69b96 100644 (file)
@@ -40,9 +40,7 @@
 
 #include "af802154.h"
 
-/*
- * Utility function for families
- */
+/* Utility function for families */
 struct net_device*
 ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
 {
@@ -87,8 +85,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
                rtnl_unlock();
                break;
        default:
-               pr_warning("Unsupported ieee802154 address type: %d\n",
-                               addr->mode);
+               pr_warn("Unsupported ieee802154 address type: %d\n",
+                       addr->mode);
                break;
        }
 
@@ -106,7 +104,7 @@ static int ieee802154_sock_release(struct socket *sock)
        return 0;
 }
 static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-               struct msghdr *msg, size_t len)
+                                  struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
 
@@ -114,7 +112,7 @@ static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 }
 
 static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
-               int addr_len)
+                               int addr_len)
 {
        struct sock *sk = sock->sk;
 
@@ -125,7 +123,7 @@ static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
-                       int addr_len, int flags)
+                                  int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
 
@@ -139,7 +137,7 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
-               unsigned int cmd)
+                               unsigned int cmd)
 {
        struct ifreq ifr;
        int ret = -ENOIOCTLCMD;
@@ -167,7 +165,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
 }
 
 static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
-               unsigned long arg)
+                                unsigned long arg)
 {
        struct sock *sk = sock->sk;
 
@@ -238,8 +236,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
 };
 
 
-/*
- * Create a socket. Initialise the socket, blank the addresses
+/* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
 static int ieee802154_create(struct net *net, struct socket *sock,
@@ -301,13 +298,14 @@ static const struct net_proto_family ieee802154_family_ops = {
 };
 
 static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                         struct packet_type *pt, struct net_device *orig_dev)
 {
        if (!netif_running(dev))
                goto drop;
        pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
 #ifdef DEBUG
-       print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+       print_hex_dump_bytes("ieee802154_rcv ",
+                            DUMP_PREFIX_NONE, skb->data, skb->len);
 #endif
 
        if (!net_eq(dev_net(dev), &init_net))
index 4f0ed8780194502465f0d5b60383bf6794bfadf0..ef2ad8aaef1361f2e061519dd773cd0f03dd4b30 100644 (file)
@@ -149,8 +149,7 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb != NULL) {
-                       /*
-                        * We will only return the amount
+                       /* We will only return the amount
                         * of this packet since that is all
                         * that will be read.
                         */
@@ -161,12 +160,13 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
        }
 
        }
+
        return -ENOIOCTLCMD;
 }
 
 /* FIXME: autobind */
 static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int len)
+                        int len)
 {
        struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
        struct dgram_sock *ro = dgram_sk(sk);
@@ -205,7 +205,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
 }
 
 static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t size)
+                        struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -248,8 +248,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT,
-                       &err);
+                                 msg->msg_flags & MSG_DONTWAIT,
+                                 &err);
        if (!skb)
                goto out_dev;
 
@@ -262,7 +262,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        cb->ackreq = ro->want_ack;
 
        if (msg->msg_name) {
-               DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
+                                daddr, msg->msg_name);
 
                ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
        } else {
@@ -304,8 +305,8 @@ out:
 }
 
 static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t len, int noblock, int flags,
-               int *addr_len)
+                        struct msghdr *msg, size_t len, int noblock,
+                        int flags, int *addr_len)
 {
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -398,6 +399,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                                          dgram_sk(sk))) {
                        if (prev) {
                                struct sk_buff *clone;
+
                                clone = skb_clone(skb, GFP_ATOMIC);
                                if (clone)
                                        dgram_rcv_skb(prev, clone);
@@ -407,9 +409,9 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                }
        }
 
-       if (prev)
+       if (prev) {
                dgram_rcv_skb(prev, skb);
-       else {
+       else {
                kfree_skb(skb);
                ret = NET_RX_DROP;
        }
@@ -419,7 +421,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int dgram_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                           char __user *optval, int __user *optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
@@ -463,7 +465,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
        struct net *net = sock_net(sk);
index 8b83a231299e46a0668b3fe329803fa1a6154791..5d352f86979e40b191e08d4bb1d63100560fd1b6 100644 (file)
@@ -43,7 +43,7 @@ struct genl_info;
 struct sk_buff *ieee802154_nl_create(int flags, u8 req);
 int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group);
 struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
-               int flags, u8 req);
+                                       int flags, u8 req);
 int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
 
 extern struct genl_family nl802154_family;
index 26efcf4fd2ff72079a678ef3a4dbd0ae887848e1..9222966f5e6d7438183825ffe306a3c4ab502b10 100644 (file)
@@ -52,7 +52,7 @@ struct sk_buff *ieee802154_nl_create(int flags, u8 req)
 
        spin_lock_irqsave(&ieee802154_seq_lock, f);
        hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
-                       &nl802154_family, flags, req);
+                         &nl802154_family, flags, req);
        spin_unlock_irqrestore(&ieee802154_seq_lock, f);
        if (!hdr) {
                nlmsg_free(msg);
@@ -86,7 +86,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
                return NULL;
 
        hdr = genlmsg_put_reply(msg, info,
-                       &nl802154_family, flags, req);
+                               &nl802154_family, flags, req);
        if (!hdr) {
                nlmsg_free(msg);
                return NULL;
index a3281b8bfd5bf1fa24bd05a351b476031776797f..c6bfe22bfa5ebedff765e6dcd4bf7bebd26d0b49 100644 (file)
@@ -60,7 +60,8 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla)
 }
 
 int ieee802154_nl_assoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 cap)
+                             struct ieee802154_addr *addr,
+                             u8 cap)
 {
        struct sk_buff *msg;
 
@@ -93,7 +94,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
 
 int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr,
-               u8 status)
+                               u8 status)
 {
        struct sk_buff *msg;
 
@@ -119,7 +120,8 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
 
 int ieee802154_nl_disassoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 reason)
+                                struct ieee802154_addr *addr,
+                                u8 reason)
 {
        struct sk_buff *msg;
 
@@ -205,8 +207,9 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
 
 int ieee802154_nl_scan_confirm(struct net_device *dev,
-               u8 status, u8 scan_type, u32 unscanned, u8 page,
-               u8 *edl/* , struct list_head *pan_desc_list */)
+                              u8 status, u8 scan_type,
+                              u32 unscanned, u8 page,
+                              u8 *edl/* , struct list_head *pan_desc_list */)
 {
        struct sk_buff *msg;
 
@@ -260,7 +263,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
 static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct net_device *dev)
+                                   u32 seq, int flags, struct net_device *dev)
 {
        void *hdr;
        struct wpan_phy *phy;
@@ -270,7 +273,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        pr_debug("%s\n", __func__);
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_IFACE);
+                         IEEE802154_LIST_IFACE);
        if (!hdr)
                goto out;
 
@@ -330,14 +333,16 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
 
        if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
                char name[IFNAMSIZ + 1];
+
                nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
-                               sizeof(name));
+                           sizeof(name));
                dev = dev_get_by_name(&init_net, name);
-       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
+       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) {
                dev = dev_get_by_index(&init_net,
                        nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
-       else
+       } else {
                return NULL;
+       }
 
        if (!dev)
                return NULL;
@@ -435,7 +440,7 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
        int ret = -EOPNOTSUPP;
 
        if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
-               !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
+           !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
            !info->attrs[IEEE802154_ATTR_REASON])
                return -EINVAL;
 
@@ -464,8 +469,7 @@ out:
        return ret;
 }
 
-/*
- * PANid, channel, beacon_order = 15, superframe_order = 15,
+/* PANid, channel, beacon_order = 15, superframe_order = 15,
  * PAN_coordinator, battery_life_extension = 0,
  * coord_realignment = 0, security_enable = 0
 */
@@ -559,8 +563,8 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
                page = 0;
 
 
-       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
-                       duration);
+       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
+                                                page, duration);
 
 out:
        dev_put(dev);
@@ -570,7 +574,8 @@ out:
 int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct net_device *dev = NULL;
        int rc = -ENOBUFS;
@@ -586,7 +591,7 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
-                       0, dev);
+                                     0, dev);
        if (rc < 0)
                goto out_free;
 
@@ -598,7 +603,6 @@ out_free:
 out_dev:
        dev_put(dev);
        return rc;
-
 }
 
 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
@@ -616,7 +620,8 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
                        goto cont;
 
                if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
-                       cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
+                                            cb->nlh->nlmsg_seq,
+                                            NLM_F_MULTI, dev) < 0)
                        break;
 cont:
                idx++;
@@ -765,6 +770,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        case IEEE802154_SCF_KEY_SHORT_INDEX:
        {
                u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
+
                desc->short_source = cpu_to_le32(source);
                break;
        }
@@ -842,7 +848,7 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
-               IEEE802154_LLSEC_GETPARAMS);
+                         IEEE802154_LLSEC_GETPARAMS);
        if (!hdr)
                goto out_free;
 
@@ -946,7 +952,7 @@ struct llsec_dump_data {
 
 static int
 ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
-                           int (*step)(struct llsec_dump_data*))
+                           int (*step)(struct llsec_dump_data *))
 {
        struct net *net = sock_net(skb->sk);
        struct net_device *dev;
index 89b265aea151eaf7f301c28f391d064b7d22edfc..972baf83411af7c64a6f11826ff7b1ab61b43ec4 100644 (file)
@@ -36,7 +36,7 @@
 #include "ieee802154.h"
 
 static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct wpan_phy *phy)
+                                 u32 seq, int flags, struct wpan_phy *phy)
 {
        void *hdr;
        int i, pages = 0;
@@ -48,7 +48,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
                return -EMSGSIZE;
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_PHY);
+                         IEEE802154_LIST_PHY);
        if (!hdr)
                goto out;
 
@@ -80,7 +80,8 @@ out:
 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct wpan_phy *phy;
        const char *name;
@@ -105,7 +106,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
-                       0, phy);
+                                   0, phy);
        if (rc < 0)
                goto out_free;
 
@@ -117,7 +118,6 @@ out_free:
 out_dev:
        wpan_phy_put(phy);
        return rc;
-
 }
 
 struct dump_phy_data {
@@ -137,10 +137,10 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
                return 0;
 
        rc = ieee802154_nl_fill_phy(data->skb,
-                       NETLINK_CB(data->cb->skb).portid,
-                       data->cb->nlh->nlmsg_seq,
-                       NLM_F_MULTI,
-                       phy);
+                                   NETLINK_CB(data->cb->skb).portid,
+                                   data->cb->nlh->nlmsg_seq,
+                                   NLM_F_MULTI,
+                                   phy);
 
        if (rc < 0) {
                data->idx--;
@@ -238,10 +238,9 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
 
                addr.sa_family = ARPHRD_IEEE802154;
                nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
-                               IEEE802154_ADDR_LEN);
+                          IEEE802154_ADDR_LEN);
 
-               /*
-                * strangely enough, some callbacks (inetdev_event) from
+               /* strangely enough, some callbacks (inetdev_event) from
                 * dev_set_mac_address require RTNL_LOCK
                 */
                rtnl_lock();
index 74d54fae33d74a58ca2628f7547250906bea663e..9d1f64806f02127808986a84b36fae92e33df4a2 100644 (file)
@@ -96,7 +96,7 @@ out:
 }
 
 static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int addr_len)
+                      int addr_len)
 {
        return -ENOTSUPP;
 }
@@ -106,8 +106,8 @@ static int raw_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t size)
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
+                      struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -145,7 +145,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
+                                 msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
                goto out_dev;
 
@@ -235,7 +235,6 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
                bh_lock_sock(sk);
                if (!sk->sk_bound_dev_if ||
                    sk->sk_bound_dev_if == dev->ifindex) {
-
                        struct sk_buff *clone;
 
                        clone = skb_clone(skb, GFP_ATOMIC);
@@ -248,13 +247,13 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int raw_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                         char __user *optval, int __user *optlen)
 {
        return -EOPNOTSUPP;
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
@@ -274,4 +273,3 @@ struct proto ieee802154_raw_prot = {
        .getsockopt     = raw_getsockopt,
        .setsockopt     = raw_setsockopt,
 };
-
index 6f1428c4870b11e9bab087c54d414079514caf8a..b85bd3f7048e7bdb7e44d06c0b49b4d2a22724f5 100644 (file)
@@ -378,6 +378,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        fq = fq_find(net, frag_info, &source, &dest);
        if (fq != NULL) {
                int ret;
+
                spin_lock(&fq->q.lock);
                ret = lowpan_frag_queue(fq, skb, frag_type);
                spin_unlock(&fq->q.lock);
index 8d6f6704da84e8b95745cedb7040230cb5b96cb1..4955e0fe5883ae705edb822b62aab36c93e7e646 100644 (file)
@@ -48,7 +48,8 @@ MASTER_SHOW(transmit_power, "%d +- 1 dB");
 MASTER_SHOW(cca_mode, "%d");
 
 static ssize_t channels_supported_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
        int ret;
@@ -57,7 +58,7 @@ static ssize_t channels_supported_show(struct device *dev,
        mutex_lock(&phy->pib_lock);
        for (i = 0; i < 32; i++) {
                ret = snprintf(buf + len, PAGE_SIZE - len,
-                               "%#09x\n", phy->channels_supported[i]);
+                              "%#09x\n", phy->channels_supported[i]);
                if (ret < 0)
                        break;
                len += ret;
@@ -80,6 +81,7 @@ ATTRIBUTE_GROUPS(pmib);
 static void wpan_phy_release(struct device *d)
 {
        struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
+
        kfree(phy);
 }
 
@@ -121,11 +123,12 @@ static int wpan_phy_iter(struct device *dev, void *_data)
 {
        struct wpan_phy_iter_data *wpid = _data;
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+
        return wpid->fn(phy, wpid->data);
 }
 
 int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
-               void *data)
+                     void *data)
 {
        struct wpan_phy_iter_data wpid = {
                .fn = fn,
@@ -197,6 +200,7 @@ EXPORT_SYMBOL(wpan_phy_free);
 static int __init wpan_phy_class_init(void)
 {
        int rc;
+
        rc = class_register(&wpan_phy_class);
        if (rc)
                goto err;
index 05c57f0fcabef4b61795bf13b8712cd5ec91374d..dbc10d84161fc59047ee01feeb9f860227c3754a 100644 (file)
@@ -307,6 +307,10 @@ config NET_IPVTI
          the notion of a secure tunnel for IPSEC and then use routing protocol
          on top.
 
+config NET_UDP_TUNNEL
+       tristate
+       default n
+
 config INET_AH
        tristate "IP: AH transformation"
        select XFRM_ALGO
index f032688d20d308412694cbc7ef29567a86264b4b..8ee1cd4053ee742d723e1cca7c3cdee7c3527b5f 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_IPIP) += ipip.o
 gre-y := gre_demux.o
 obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
 obj-$(CONFIG_NET_IPGRE) += ip_gre.o
+obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o
 obj-$(CONFIG_NET_IPVTI) += ip_vti.o
 obj-$(CONFIG_SYN_COOKIES) += syncookies.o
 obj-$(CONFIG_INET_AH) += ah4.o
index a3095fdefbed98ed4e320ac6c44ea3e18241d1a4..90c0e8386116177f4bbf412f2175aec93c64870c 100644 (file)
@@ -76,6 +76,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
+       inet_set_txhash(sk);
        inet->inet_id = jiffies;
 
        sk_dst_set(sk, &rt->dst);
index 8d3b6b0e98574b5507f05972cf7fbac8ba32c465..b16556836d66a63981826dd3625419f3173e24ce 100644 (file)
@@ -962,10 +962,6 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
-                               else
-                                       /* only the initial fragment is
-                                          time stamped */
-                                       cork->tx_flags = 0;
                        }
                        if (skb == NULL)
                                goto error;
@@ -976,7 +972,10 @@ alloc_new_skb:
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);
+
+                       /* only the initial fragment is time stamped */
                        skb_shinfo(skb)->tx_flags = cork->tx_flags;
+                       cork->tx_flags = 0;
 
                        /*
                         *      Find where to start putting bytes.
index 6f9de61dce5f9585625443af1d372ddb931adf03..dd8c8c76579986babb0f9f03f4f224e8e99d1778 100644 (file)
@@ -305,7 +305,7 @@ static struct net_device *__ip_tunnel_create(struct net *net,
        }
 
        ASSERT_RTNL();
-       dev = alloc_netdev(ops->priv_size, name, ops->setup);
+       dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
        if (!dev) {
                err = -ENOMEM;
                goto failed;
index b3e86ea7b71b7e480ce64d243e3c1951484a7cbb..5bbef4fdcb439f7c3445278dc580b3f0aaacf907 100644 (file)
@@ -143,8 +143,6 @@ __be32 ic_servaddr = NONE;  /* Boot server IP address */
 __be32 root_server_addr = NONE;        /* Address of NFS server */
 u8 root_server_path[256] = { 0, };     /* Path to mount as root */
 
-__be32 ic_dev_xid;             /* Device under configuration */
-
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
 
@@ -654,6 +652,7 @@ static struct packet_type bootp_packet_type __initdata = {
        .func = ic_bootp_recv,
 };
 
+static __be32 ic_dev_xid;              /* Device under configuration */
 
 /*
  *  Initialize DHCP/BOOTP extension fields in the request.
@@ -1218,10 +1217,10 @@ static int __init ic_dynamic(void)
        get_random_bytes(&timeout, sizeof(timeout));
        timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
        for (;;) {
+#ifdef IPCONFIG_BOOTP
                /* Track the device we are configuring */
                ic_dev_xid = d->xid;
 
-#ifdef IPCONFIG_BOOTP
                if (do_bootp && (d->able & IC_BOOTP))
                        ic_bootp_send_if(d, jiffies - start_jiffies);
 #endif
index 65bcaa7890436df5ad2ee16f4465af51e141357b..c8034587859d3ebdda204711a7939abe8f66a22d 100644 (file)
@@ -500,7 +500,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
        else
                sprintf(name, "pimreg%u", mrt->id);
 
-       dev = alloc_netdev(0, name, reg_vif_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 
        if (dev == NULL)
                return NULL;
index a26ce035e3fad076a1d76fae0c377771300c7a04..fb173126f03dfb19ef5a215693a1985cc00450a2 100644 (file)
@@ -36,6 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
+config NF_LOG_ARP
+       tristate "ARP packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
+config NF_LOG_IPV4
+       tristate "IPv4 packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
 config NF_TABLES_IPV4
        depends on NF_TABLES
        tristate "IPv4 nf_tables support"
@@ -159,25 +169,6 @@ config IP_NF_TARGET_SYNPROXY
 
          To compile it as a module, choose M here. If unsure, say N.
 
-config IP_NF_TARGET_ULOG
-       tristate "ULOG target support (obsolete)"
-       default m if NETFILTER_ADVANCED=n
-       ---help---
-
-         This option enables the old IPv4-only "ipt_ULOG" implementation
-         which has been obsoleted by the new "nfnetlink_log" code (see
-         CONFIG_NETFILTER_NETLINK_LOG).
-
-         This option adds a `ULOG' target, which allows you to create rules in
-         any iptables table. The packet is passed to a userspace logging
-         daemon using netlink multicast sockets; unlike the LOG target
-         which can only be viewed through syslog.
-
-         The appropriate userspace logging daemon (ulogd) may be obtained from
-         <http://www.netfilter.org/projects/ulogd/index.html>
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 # NAT + specific targets: nf_conntrack
 config NF_NAT_IPV4
        tristate "IPv4 NAT"
index 90b82405331e1736c8bb4f0d4cacfd3c8fd4e783..245db9df3337a0d5b4eb873d592434ce3904ff17 100644 (file)
@@ -19,6 +19,10 @@ obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
 # defrag
 obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
 
+# logging
+obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o
+obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o
+
 # NAT helpers (nf_conntrack)
 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
deleted file mode 100644 (file)
index 9cb993c..0000000
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * netfilter module for userspace packet logging daemons
- *
- * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
- * (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2005-2007 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This module accepts two parameters:
- *
- * nlbufsiz:
- *   The parameter specifies how big the buffer for each netlink multicast
- * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
- * get accumulated in the kernel until they are sent to userspace. It is
- * NOT possible to allocate more than 128kB, and it is strongly discouraged,
- * because atomically allocating 128kB inside the network rx softirq is not
- * reliable. Please also keep in mind that this buffer size is allocated for
- * each nlgroup you are using, so the total kernel memory usage increases
- * by that factor.
- *
- * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
- * nlbufsiz is used with alloc_skb, which adds another
- * sizeof(struct skb_shared_info).  Use NLMSG_GOODSIZE instead.
- *
- * flushtimeout:
- *   Specify, after how many hundredths of a second the queue should be
- *   flushed even if it is not full yet.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/socket.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <net/netlink.h>
-#include <linux/netdevice.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ipt_ULOG.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netns/generic.h>
-#include <net/sock.h>
-#include <linux/bitops.h>
-#include <asm/unaligned.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Xtables: packet logging to netlink using ULOG");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
-
-#define ULOG_NL_EVENT          111             /* Harald's favorite number */
-#define ULOG_MAXNLGROUPS       32              /* numer of nlgroups */
-
-static unsigned int nlbufsiz = NLMSG_GOODSIZE;
-module_param(nlbufsiz, uint, 0400);
-MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
-
-static unsigned int flushtimeout = 10;
-module_param(flushtimeout, uint, 0600);
-MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
-
-static bool nflog = true;
-module_param(nflog, bool, 0400);
-MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
-
-/* global data structures */
-
-typedef struct {
-       unsigned int qlen;              /* number of nlmsgs' in the skb */
-       struct nlmsghdr *lastnlh;       /* netlink header of last msg in skb */
-       struct sk_buff *skb;            /* the pre-allocated skb */
-       struct timer_list timer;        /* the timer function */
-} ulog_buff_t;
-
-static int ulog_net_id __read_mostly;
-struct ulog_net {
-       unsigned int nlgroup[ULOG_MAXNLGROUPS];
-       ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS];
-       struct sock *nflognl;
-       spinlock_t lock;
-};
-
-static struct ulog_net *ulog_pernet(struct net *net)
-{
-       return net_generic(net, ulog_net_id);
-}
-
-/* send one ulog_buff_t to userspace */
-static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
-{
-       ulog_buff_t *ub = &ulog->ulog_buffers[nlgroupnum];
-
-       pr_debug("ulog_send: timer is deleting\n");
-       del_timer(&ub->timer);
-
-       if (!ub->skb) {
-               pr_debug("ulog_send: nothing to send\n");
-               return;
-       }
-
-       /* last nlmsg needs NLMSG_DONE */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_type = NLMSG_DONE;
-
-       NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
-       pr_debug("throwing %d packets to netlink group %u\n",
-                ub->qlen, nlgroupnum + 1);
-       netlink_broadcast(ulog->nflognl, ub->skb, 0, nlgroupnum + 1,
-                         GFP_ATOMIC);
-
-       ub->qlen = 0;
-       ub->skb = NULL;
-       ub->lastnlh = NULL;
-}
-
-
-/* timer function to flush queue in flushtimeout time */
-static void ulog_timer(unsigned long data)
-{
-       unsigned int groupnum = *((unsigned int *)data);
-       struct ulog_net *ulog = container_of((void *)data,
-                                            struct ulog_net,
-                                            nlgroup[groupnum]);
-       pr_debug("timer function called, calling ulog_send\n");
-
-       /* lock to protect against somebody modifying our structure
-        * from ipt_ulog_target at the same time */
-       spin_lock_bh(&ulog->lock);
-       ulog_send(ulog, groupnum);
-       spin_unlock_bh(&ulog->lock);
-}
-
-static struct sk_buff *ulog_alloc_skb(unsigned int size)
-{
-       struct sk_buff *skb;
-       unsigned int n;
-
-       /* alloc skb which should be big enough for a whole
-        * multipart message. WARNING: has to be <= 131000
-        * due to slab allocator restrictions */
-
-       n = max(size, nlbufsiz);
-       skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
-       if (!skb) {
-               if (n > size) {
-                       /* try to allocate only as much as we need for
-                        * current packet */
-
-                       skb = alloc_skb(size, GFP_ATOMIC);
-                       if (!skb)
-                               pr_debug("cannot even allocate %ub\n", size);
-               }
-       }
-
-       return skb;
-}
-
-static void ipt_ulog_packet(struct net *net,
-                           unsigned int hooknum,
-                           const struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           const struct ipt_ulog_info *loginfo,
-                           const char *prefix)
-{
-       ulog_buff_t *ub;
-       ulog_packet_msg_t *pm;
-       size_t size, copy_len;
-       struct nlmsghdr *nlh;
-       struct timeval tv;
-       struct ulog_net *ulog = ulog_pernet(net);
-
-       /* ffs == find first bit set, necessary because userspace
-        * is already shifting groupnumber, but we need unshifted.
-        * ffs() returns [1..32], we need [0..31] */
-       unsigned int groupnum = ffs(loginfo->nl_group) - 1;
-
-       /* calculate the size of the skb needed */
-       if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
-               copy_len = skb->len;
-       else
-               copy_len = loginfo->copy_range;
-
-       size = nlmsg_total_size(sizeof(*pm) + copy_len);
-
-       ub = &ulog->ulog_buffers[groupnum];
-
-       spin_lock_bh(&ulog->lock);
-
-       if (!ub->skb) {
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto alloc_failure;
-       } else if (ub->qlen >= loginfo->qthreshold ||
-                  size > skb_tailroom(ub->skb)) {
-               /* either the queue len is too high or we don't have
-                * enough room in nlskb left. send it to userspace. */
-
-               ulog_send(ulog, groupnum);
-
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto alloc_failure;
-       }
-
-       pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
-
-       nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
-                       sizeof(*pm)+copy_len, 0);
-       if (!nlh) {
-               pr_debug("error during nlmsg_put\n");
-               goto out_unlock;
-       }
-       ub->qlen++;
-
-       pm = nlmsg_data(nlh);
-       memset(pm, 0, sizeof(*pm));
-
-       /* We might not have a timestamp, get one */
-       if (skb->tstamp.tv64 == 0)
-               __net_timestamp((struct sk_buff *)skb);
-
-       /* copy hook, prefix, timestamp, payload, etc. */
-       pm->data_len = copy_len;
-       tv = ktime_to_timeval(skb->tstamp);
-       put_unaligned(tv.tv_sec, &pm->timestamp_sec);
-       put_unaligned(tv.tv_usec, &pm->timestamp_usec);
-       put_unaligned(skb->mark, &pm->mark);
-       pm->hook = hooknum;
-       if (prefix != NULL) {
-               strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
-               pm->prefix[sizeof(pm->prefix) - 1] = '\0';
-       }
-       else if (loginfo->prefix[0] != '\0')
-               strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
-
-       if (in && in->hard_header_len > 0 &&
-           skb->mac_header != skb->network_header &&
-           in->hard_header_len <= ULOG_MAC_LEN) {
-               memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
-               pm->mac_len = in->hard_header_len;
-       } else
-               pm->mac_len = 0;
-
-       if (in)
-               strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
-
-       if (out)
-               strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
-
-       /* copy_len <= skb->len, so can't fail. */
-       if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
-               BUG();
-
-       /* check if we are building multi-part messages */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
-
-       ub->lastnlh = nlh;
-
-       /* if timer isn't already running, start it */
-       if (!timer_pending(&ub->timer)) {
-               ub->timer.expires = jiffies + flushtimeout * HZ / 100;
-               add_timer(&ub->timer);
-       }
-
-       /* if threshold is reached, send message to userspace */
-       if (ub->qlen >= loginfo->qthreshold) {
-               if (loginfo->qthreshold > 1)
-                       nlh->nlmsg_type = NLMSG_DONE;
-               ulog_send(ulog, groupnum);
-       }
-out_unlock:
-       spin_unlock_bh(&ulog->lock);
-
-       return;
-
-alloc_failure:
-       pr_debug("Error building netlink message\n");
-       spin_unlock_bh(&ulog->lock);
-}
-
-static unsigned int
-ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-
-       ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
-                       par->targinfo, NULL);
-       return XT_CONTINUE;
-}
-
-static void ipt_logfn(struct net *net,
-                     u_int8_t pf,
-                     unsigned int hooknum,
-                     const struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     const struct nf_loginfo *li,
-                     const char *prefix)
-{
-       struct ipt_ulog_info loginfo;
-
-       if (!li || li->type != NF_LOG_TYPE_ULOG) {
-               loginfo.nl_group = ULOG_DEFAULT_NLGROUP;
-               loginfo.copy_range = 0;
-               loginfo.qthreshold = ULOG_DEFAULT_QTHRESHOLD;
-               loginfo.prefix[0] = '\0';
-       } else {
-               loginfo.nl_group = li->u.ulog.group;
-               loginfo.copy_range = li->u.ulog.copy_len;
-               loginfo.qthreshold = li->u.ulog.qthreshold;
-               strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
-       }
-
-       ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
-}
-
-static int ulog_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct ipt_ulog_info *loginfo = par->targinfo;
-
-       if (!par->net->xt.ulog_warn_deprecated) {
-               pr_info("ULOG is deprecated and it will be removed soon, "
-                       "use NFLOG instead\n");
-               par->net->xt.ulog_warn_deprecated = true;
-       }
-
-       if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
-               pr_debug("prefix not null-terminated\n");
-               return -EINVAL;
-       }
-       if (loginfo->qthreshold > ULOG_MAX_QLEN) {
-               pr_debug("queue threshold %Zu > MAX_QLEN\n",
-                        loginfo->qthreshold);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_ipt_ulog_info {
-       compat_uint_t   nl_group;
-       compat_size_t   copy_range;
-       compat_size_t   qthreshold;
-       char            prefix[ULOG_PREFIX_LEN];
-};
-
-static void ulog_tg_compat_from_user(void *dst, const void *src)
-{
-       const struct compat_ipt_ulog_info *cl = src;
-       struct ipt_ulog_info l = {
-               .nl_group       = cl->nl_group,
-               .copy_range     = cl->copy_range,
-               .qthreshold     = cl->qthreshold,
-       };
-
-       memcpy(l.prefix, cl->prefix, sizeof(l.prefix));
-       memcpy(dst, &l, sizeof(l));
-}
-
-static int ulog_tg_compat_to_user(void __user *dst, const void *src)
-{
-       const struct ipt_ulog_info *l = src;
-       struct compat_ipt_ulog_info cl = {
-               .nl_group       = l->nl_group,
-               .copy_range     = l->copy_range,
-               .qthreshold     = l->qthreshold,
-       };
-
-       memcpy(cl.prefix, l->prefix, sizeof(cl.prefix));
-       return copy_to_user(dst, &cl, sizeof(cl)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_target ulog_tg_reg __read_mostly = {
-       .name           = "ULOG",
-       .family         = NFPROTO_IPV4,
-       .target         = ulog_tg,
-       .targetsize     = sizeof(struct ipt_ulog_info),
-       .checkentry     = ulog_tg_check,
-#ifdef CONFIG_COMPAT
-       .compatsize     = sizeof(struct compat_ipt_ulog_info),
-       .compat_from_user = ulog_tg_compat_from_user,
-       .compat_to_user = ulog_tg_compat_to_user,
-#endif
-       .me             = THIS_MODULE,
-};
-
-static struct nf_logger ipt_ulog_logger __read_mostly = {
-       .name           = "ipt_ULOG",
-       .logfn          = ipt_logfn,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ulog_tg_net_init(struct net *net)
-{
-       int i;
-       struct ulog_net *ulog = ulog_pernet(net);
-       struct netlink_kernel_cfg cfg = {
-               .groups = ULOG_MAXNLGROUPS,
-       };
-
-       spin_lock_init(&ulog->lock);
-       /* initialize ulog_buffers */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
-               ulog->nlgroup[i] = i;
-               setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
-                           (unsigned long)&ulog->nlgroup[i]);
-       }
-
-       ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
-       if (!ulog->nflognl)
-               return -ENOMEM;
-
-       if (nflog)
-               nf_log_set(net, NFPROTO_IPV4, &ipt_ulog_logger);
-
-       return 0;
-}
-
-static void __net_exit ulog_tg_net_exit(struct net *net)
-{
-       ulog_buff_t *ub;
-       int i;
-       struct ulog_net *ulog = ulog_pernet(net);
-
-       if (nflog)
-               nf_log_unset(net, &ipt_ulog_logger);
-
-       netlink_kernel_release(ulog->nflognl);
-
-       /* remove pending timers and free allocated skb's */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
-               ub = &ulog->ulog_buffers[i];
-               pr_debug("timer is deleting\n");
-               del_timer(&ub->timer);
-
-               if (ub->skb) {
-                       kfree_skb(ub->skb);
-                       ub->skb = NULL;
-               }
-       }
-}
-
-static struct pernet_operations ulog_tg_net_ops = {
-       .init = ulog_tg_net_init,
-       .exit = ulog_tg_net_exit,
-       .id   = &ulog_net_id,
-       .size = sizeof(struct ulog_net),
-};
-
-static int __init ulog_tg_init(void)
-{
-       int ret;
-       pr_debug("init module\n");
-
-       if (nlbufsiz > 128*1024) {
-               pr_warn("Netlink buffer has to be <= 128kB\n");
-               return -EINVAL;
-       }
-
-       ret = register_pernet_subsys(&ulog_tg_net_ops);
-       if (ret)
-               goto out_pernet;
-
-       ret = xt_register_target(&ulog_tg_reg);
-       if (ret < 0)
-               goto out_target;
-
-       if (nflog)
-               nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger);
-
-       return 0;
-
-out_target:
-       unregister_pernet_subsys(&ulog_tg_net_ops);
-out_pernet:
-       return ret;
-}
-
-static void __exit ulog_tg_exit(void)
-{
-       pr_debug("cleanup_module\n");
-       if (nflog)
-               nf_log_unregister(&ipt_ulog_logger);
-       xt_unregister_target(&ulog_tg_reg);
-       unregister_pernet_subsys(&ulog_tg_net_ops);
-}
-
-module_init(ulog_tg_init);
-module_exit(ulog_tg_exit);
index 8127dc802865c2e992de63ae5d63837f3fd73fd6..4ce44c4bc57b7665d4b576294a46dcd73aafd443 100644 (file)
@@ -314,7 +314,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        return -ENOENT;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -388,7 +388,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
        .invert_tuple    = ipv4_invert_tuple,
        .print_tuple     = ipv4_print_tuple,
        .get_l4proto     = ipv4_get_l4proto,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr = ipv4_tuple_to_nlattr,
        .nlattr_tuple_size = ipv4_nlattr_tuple_size,
        .nlattr_to_tuple = ipv4_nlattr_to_tuple,
index a338dad41b7d8b70d755c3e149db8916615dd89a..b91b2641adda6b2f35768f307ef6c13c10a8d351 100644 (file)
@@ -226,7 +226,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
        return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -408,7 +408,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
        .error                  = icmp_error,
        .destroy                = NULL,
        .me                     = NULL,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = icmp_tuple_to_nlattr,
        .nlattr_tuple_size      = icmp_nlattr_tuple_size,
        .nlattr_to_tuple        = icmp_nlattr_to_tuple,
index b8f6381c7d0b15f49973a3748937ea23325bee03..76bd1aef257f213d04b3f4393f3705cc4863558d 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack.h>
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
@@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 {
        u16 zone = NF_CT_DEFAULT_ZONE;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (skb->nfct)
                zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
@@ -74,8 +74,8 @@ static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
            inet->nodefrag)
                return NF_ACCEPT;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#if !IS_ENABLED(CONFIG_NF_NAT)
        /* Previously seen (loopback)?  Ignore.  Do this before
           fragment check. */
        if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
new file mode 100644 (file)
index 0000000..ccfc78d
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Based on code from ebt_log from:
+ *
+ * Bart De Schuymer <bdschuym@pandora.be>
+ * Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+struct arppayload {
+       unsigned char mac_src[ETH_ALEN];
+       unsigned char ip_src[4];
+       unsigned char mac_dst[ETH_ALEN];
+       unsigned char ip_dst[4];
+};
+
+static void dump_arp_packet(struct nf_log_buf *m,
+                           const struct nf_loginfo *info,
+                           const struct sk_buff *skb, unsigned int nhoff)
+{
+       const struct arphdr *ah;
+       struct arphdr _arph;
+       const struct arppayload *ap;
+       struct arppayload _arpp;
+
+       ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
+       if (ah == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+       nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
+                      ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
+
+       /* If it's for Ethernet and the lengths are OK, then log the ARP
+        * payload.
+        */
+       if (ah->ar_hrd != htons(1) ||
+           ah->ar_hln != ETH_ALEN ||
+           ah->ar_pln != sizeof(__be32))
+               return;
+
+       ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
+       if (ap == NULL) {
+               nf_log_buf_add(m, " INCOMPLETE [%Zu bytes]",
+                              skb->len - sizeof(_arph));
+               return;
+       }
+       nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4",
+                      ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
+}
+
+void nf_log_arp_packet(struct net *net, u_int8_t pf,
+                     unsigned int hooknum, const struct sk_buff *skb,
+                     const struct net_device *in,
+                     const struct net_device *out,
+                     const struct nf_loginfo *loginfo,
+                     const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+                                 prefix);
+       dump_arp_packet(m, loginfo, skb, 0);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_arp_logger __read_mostly = {
+       .name           = "nf_log_arp",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_arp_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_arp_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_arp_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_arp_logger);
+}
+
+static struct pernet_operations nf_log_arp_net_ops = {
+       .init = nf_log_arp_net_init,
+       .exit = nf_log_arp_net_exit,
+};
+
+static int __init nf_log_arp_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_arp_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_ARP, &nf_arp_logger);
+       return 0;
+}
+
+static void __exit nf_log_arp_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_arp_net_ops);
+       nf_log_unregister(&nf_arp_logger);
+}
+
+module_init(nf_log_arp_init);
+module_exit(nf_log_arp_exit);
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter ARP packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(3, 0);
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
new file mode 100644 (file)
index 0000000..078bdca
--- /dev/null
@@ -0,0 +1,385 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+/* One level of recursion won't kill us */
+static void dump_ipv4_packet(struct nf_log_buf *m,
+                            const struct nf_loginfo *info,
+                            const struct sk_buff *skb, unsigned int iphoff)
+{
+       struct iphdr _iph;
+       const struct iphdr *ih;
+       unsigned int logflags;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+       else
+               logflags = NF_LOG_MASK;
+
+       ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
+       if (ih == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+
+       /* Important fields:
+        * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
+       /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
+       nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr);
+
+       /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+       nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+                      ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
+                      ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
+
+       /* Max length: 6 "CE DF MF " */
+       if (ntohs(ih->frag_off) & IP_CE)
+               nf_log_buf_add(m, "CE ");
+       if (ntohs(ih->frag_off) & IP_DF)
+               nf_log_buf_add(m, "DF ");
+       if (ntohs(ih->frag_off) & IP_MF)
+               nf_log_buf_add(m, "MF ");
+
+       /* Max length: 11 "FRAG:65535 " */
+       if (ntohs(ih->frag_off) & IP_OFFSET)
+               nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
+
+       if ((logflags & XT_LOG_IPOPT) &&
+           ih->ihl * 4 > sizeof(struct iphdr)) {
+               const unsigned char *op;
+               unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
+               unsigned int i, optsize;
+
+               optsize = ih->ihl * 4 - sizeof(struct iphdr);
+               op = skb_header_pointer(skb, iphoff+sizeof(_iph),
+                                       optsize, _opt);
+               if (op == NULL) {
+                       nf_log_buf_add(m, "TRUNCATED");
+                       return;
+               }
+
+               /* Max length: 127 "OPT (" 15*4*2chars ") " */
+               nf_log_buf_add(m, "OPT (");
+               for (i = 0; i < optsize; i++)
+                       nf_log_buf_add(m, "%02X", op[i]);
+               nf_log_buf_add(m, ") ");
+       }
+
+       switch (ih->protocol) {
+       case IPPROTO_TCP:
+               if (nf_log_dump_tcp_header(m, skb, ih->protocol,
+                                          ntohs(ih->frag_off) & IP_OFFSET,
+                                          iphoff+ih->ihl*4, logflags))
+                       return;
+               break;
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+               if (nf_log_dump_udp_header(m, skb, ih->protocol,
+                                          ntohs(ih->frag_off) & IP_OFFSET,
+                                          iphoff+ih->ihl*4))
+                       return;
+               break;
+       case IPPROTO_ICMP: {
+               struct icmphdr _icmph;
+               const struct icmphdr *ich;
+               static const size_t required_len[NR_ICMP_TYPES+1]
+                       = { [ICMP_ECHOREPLY] = 4,
+                           [ICMP_DEST_UNREACH]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_SOURCE_QUENCH]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_REDIRECT]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_ECHO] = 4,
+                           [ICMP_TIME_EXCEEDED]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_PARAMETERPROB]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_TIMESTAMP] = 20,
+                           [ICMP_TIMESTAMPREPLY] = 20,
+                           [ICMP_ADDRESS] = 12,
+                           [ICMP_ADDRESSREPLY] = 12 };
+
+               /* Max length: 11 "PROTO=ICMP " */
+               nf_log_buf_add(m, "PROTO=ICMP ");
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
+                                        sizeof(_icmph), &_icmph);
+               if (ich == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Max length: 18 "TYPE=255 CODE=255 " */
+               nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               if (ich->type <= NR_ICMP_TYPES &&
+                   required_len[ich->type] &&
+                   skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               switch (ich->type) {
+               case ICMP_ECHOREPLY:
+               case ICMP_ECHO:
+                       /* Max length: 19 "ID=65535 SEQ=65535 " */
+                       nf_log_buf_add(m, "ID=%u SEQ=%u ",
+                                      ntohs(ich->un.echo.id),
+                                      ntohs(ich->un.echo.sequence));
+                       break;
+
+               case ICMP_PARAMETERPROB:
+                       /* Max length: 14 "PARAMETER=255 " */
+                       nf_log_buf_add(m, "PARAMETER=%u ",
+                                      ntohl(ich->un.gateway) >> 24);
+                       break;
+               case ICMP_REDIRECT:
+                       /* Max length: 24 "GATEWAY=255.255.255.255 " */
+                       nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
+                       /* Fall through */
+               case ICMP_DEST_UNREACH:
+               case ICMP_SOURCE_QUENCH:
+               case ICMP_TIME_EXCEEDED:
+                       /* Max length: 3+maxlen */
+                       if (!iphoff) { /* Only recurse once. */
+                               nf_log_buf_add(m, "[");
+                               dump_ipv4_packet(m, info, skb,
+                                           iphoff + ih->ihl*4+sizeof(_icmph));
+                               nf_log_buf_add(m, "] ");
+                       }
+
+                       /* Max length: 10 "MTU=65535 " */
+                       if (ich->type == ICMP_DEST_UNREACH &&
+                           ich->code == ICMP_FRAG_NEEDED) {
+                               nf_log_buf_add(m, "MTU=%u ",
+                                              ntohs(ich->un.frag.mtu));
+                       }
+               }
+               break;
+       }
+       /* Max Length */
+       case IPPROTO_AH: {
+               struct ip_auth_hdr _ahdr;
+               const struct ip_auth_hdr *ah;
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 9 "PROTO=AH " */
+               nf_log_buf_add(m, "PROTO=AH ");
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
+                                       sizeof(_ahdr), &_ahdr);
+               if (ah == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Length: 15 "SPI=0xF1234567 " */
+               nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
+               break;
+       }
+       case IPPROTO_ESP: {
+               struct ip_esp_hdr _esph;
+               const struct ip_esp_hdr *eh;
+
+               /* Max length: 10 "PROTO=ESP " */
+               nf_log_buf_add(m, "PROTO=ESP ");
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
+                                       sizeof(_esph), &_esph);
+               if (eh == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Length: 15 "SPI=0xF1234567 " */
+               nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi));
+               break;
+       }
+       /* Max length: 10 "PROTO 255 " */
+       default:
+               nf_log_buf_add(m, "PROTO=%u ", ih->protocol);
+       }
+
+       /* Max length: 15 "UID=4294967295 " */
+       if ((logflags & XT_LOG_UID) && !iphoff)
+               nf_log_dump_sk_uid_gid(m, skb->sk);
+
+       /* Max length: 16 "MARK=0xFFFFFFFF " */
+       if (!iphoff && skb->mark)
+               nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
+
+       /* Proto    Max log string length */
+       /* IP:      40+46+6+11+127 = 230 */
+       /* TCP:     10+max(25,20+30+13+9+32+11+127) = 252 */
+       /* UDP:     10+max(25,20) = 35 */
+       /* UDPLITE: 14+max(25,20) = 39 */
+       /* ICMP:    11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
+       /* ESP:     10+max(25)+15 = 50 */
+       /* AH:      9+max(25)+15 = 49 */
+       /* unknown: 10 */
+
+       /* (ICMP allows recursion one level deep) */
+       /* maxlen =  IP + ICMP +  IP + max(TCP,UDP,ICMP,unknown) */
+       /* maxlen = 230+   91  + 230 + 252 = 803 */
+}
+
+static void dump_ipv4_mac_header(struct nf_log_buf *m,
+                           const struct nf_loginfo *info,
+                           const struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       unsigned int logflags = 0;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+
+       if (!(logflags & XT_LOG_MACDECODE))
+               goto fallback;
+
+       switch (dev->type) {
+       case ARPHRD_ETHER:
+               nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+                              eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                              ntohs(eth_hdr(skb)->h_proto));
+               return;
+       default:
+               break;
+       }
+
+fallback:
+       nf_log_buf_add(m, "MAC=");
+       if (dev->hard_header_len &&
+           skb->mac_header != skb->network_header) {
+               const unsigned char *p = skb_mac_header(skb);
+               unsigned int i;
+
+               nf_log_buf_add(m, "%02x", *p++);
+               for (i = 1; i < dev->hard_header_len; i++, p++)
+                       nf_log_buf_add(m, ":%02x", *p);
+       }
+       nf_log_buf_add(m, " ");
+}
+
+static void nf_log_ip_packet(struct net *net, u_int8_t pf,
+                            unsigned int hooknum, const struct sk_buff *skb,
+                            const struct net_device *in,
+                            const struct net_device *out,
+                            const struct nf_loginfo *loginfo,
+                            const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in,
+                                 out, loginfo, prefix);
+
+       if (in != NULL)
+               dump_ipv4_mac_header(m, loginfo, skb);
+
+       dump_ipv4_packet(m, loginfo, skb, 0);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_ip_logger __read_mostly = {
+       .name           = "nf_log_ipv4",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_ip_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_ipv4_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_ipv4_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_ip_logger);
+}
+
+static struct pernet_operations nf_log_ipv4_net_ops = {
+       .init = nf_log_ipv4_net_init,
+       .exit = nf_log_ipv4_net_exit,
+};
+
+static int __init nf_log_ipv4_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_ipv4_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
+       return 0;
+}
+
+static void __exit nf_log_ipv4_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_ipv4_net_ops);
+       nf_log_unregister(&nf_ip_logger);
+}
+
+module_init(nf_log_ipv4_init);
+module_exit(nf_log_ipv4_exit);
+
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_INET, 0);
index d8b2e14efddc0d6a0c4ab8bc968ba84b29590fa7..14f5ccd063378a0b62162710aeba4a0dc229541c 100644 (file)
@@ -154,6 +154,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
                                         htons(oldlen), htons(datalen), 1);
 }
 
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
                                       struct nf_nat_range *range)
 {
@@ -169,6 +170,7 @@ static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
 
        return 0;
 }
+#endif
 
 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
        .l3proto                = NFPROTO_IPV4,
@@ -177,7 +179,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
        .manip_pkt              = nf_nat_ipv4_manip_pkt,
        .csum_update            = nf_nat_ipv4_csum_update,
        .csum_recalc            = nf_nat_ipv4_csum_recalc,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_ipv4_nlattr_to_range,
+#endif
 #ifdef CONFIG_XFRM
        .decode_session         = nf_nat_ipv4_decode_session,
 #endif
index 690d890111bbdf0c254360e2dfb14d60067810a1..9414923f1e156939bbd68acf22ca76d2e7e34698 100644 (file)
@@ -124,7 +124,7 @@ static const struct nf_nat_l4proto gre = {
        .manip_pkt              = gre_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = gre_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index eb303471bcf6c252c2017061fd866e4ccf6a1fd1..4557b4ab8342740696b5fa4d3de8c6218ed70186 100644 (file)
@@ -77,7 +77,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
        .manip_pkt              = icmp_manip_pkt,
        .in_range               = icmp_in_range,
        .unique_tuple           = icmp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 2c65160565e1a084b5ff13832cfdb80d50ba7d6c..2054d7136c62c8afce519dfb5b5799397b062764 100644 (file)
@@ -365,6 +365,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+
        skb->transport_header = skb->network_header;
        err = -EFAULT;
        if (memcpy_fromiovecend((void *)iph, from, 0, length))
@@ -606,6 +608,8 @@ back_from_confirm:
                                      &rt, msg->msg_flags);
 
         else {
+               sock_tx_timestamp(sk, &ipc.tx_flags);
+
                if (!ipc.addr)
                        ipc.addr = fl4.daddr;
                lock_sock(sk);
index c86624b36a62ece1dd34bf39561d52e34f467bd3..c0c75688896e06bd1b64384c94e7db758f842d0b 100644 (file)
@@ -170,7 +170,8 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mssp)
 {
        const struct iphdr *iph = ip_hdr(skb);
        const struct tcphdr *th = tcp_hdr(skb);
index 40639c288dc229d205eccb257886d8867d973759..7832d941dbcda412cef6e0cfcaef446c8c147854 100644 (file)
@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
  *     losses and/or application stalls), do not perform any further cwnd
  *     reductions, but instead slow start up to ssthresh.
  */
-static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
+static void tcp_init_cwnd_reduction(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2485,8 +2485,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
        tp->prior_cwnd = tp->snd_cwnd;
        tp->prr_delivered = 0;
        tp->prr_out = 0;
-       if (set_ssthresh)
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+       tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
        TCP_ECN_queue_cwr(tp);
 }
 
@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
 }
 
 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+void tcp_enter_cwr(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        tp->prior_ssthresh = 0;
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
-               tcp_init_cwnd_reduction(sk, set_ssthresh);
+               tcp_init_cwnd_reduction(sk);
                tcp_set_ca_state(sk, TCP_CA_CWR);
        }
 }
@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
                tp->retrans_stamp = 0;
 
        if (flag & FLAG_ECE)
-               tcp_enter_cwr(sk, 1);
+               tcp_enter_cwr(sk);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
@@ -2670,7 +2669,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tcp_init_cwnd_reduction(sk, true);
+               tcp_init_cwnd_reduction(sk);
        }
        tcp_set_ca_state(sk, TCP_CA_Recovery);
 }
@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                tp->tlp_high_seq = 0;
                /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
                if (!(flag & FLAG_DSACKING_ACK)) {
-                       tcp_init_cwnd_reduction(sk, true);
+                       tcp_init_cwnd_reduction(sk);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
                        tcp_try_keep_open(sk);
@@ -5877,3 +5876,153 @@ discard:
        return 0;
 }
 EXPORT_SYMBOL(tcp_rcv_state_process);
+
+static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       if (family == AF_INET)
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
+                              &ireq->ir_rmt_addr, port);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (family == AF_INET6)
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
+                              &ireq->ir_v6_rmt_addr, port);
+#endif
+}
+
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                    const struct tcp_request_sock_ops *af_ops,
+                    struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_options_received tmp_opt;
+       struct request_sock *req;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct dst_entry *dst = NULL;
+       __u32 isn = TCP_SKB_CB(skb)->when;
+       bool want_cookie = false, fastopen;
+       struct flowi fl;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       int err;
+
+
+       /* TW buckets are converted to open requests without
+        * limitations, they conserve resources and peer is
+        * evidently real one.
+        */
+       if ((sysctl_tcp_syncookies == 2 ||
+            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
+               want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
+               if (!want_cookie)
+                       goto drop;
+       }
+
+
+       /* Accept backlog is full. If we have already queued enough
+        * of warm entries in syn queue, drop request. It is better than
+        * clogging syn queue with openreqs with exponentially increasing
+        * timeout.
+        */
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+               goto drop;
+       }
+
+       req = inet_reqsk_alloc(rsk_ops);
+       if (!req)
+               goto drop;
+
+       tcp_rsk(req)->af_specific = af_ops;
+
+       tcp_clear_options(&tmp_opt);
+       tmp_opt.mss_clamp = af_ops->mss_clamp;
+       tmp_opt.user_mss  = tp->rx_opt.user_mss;
+       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+
+       if (want_cookie && !tmp_opt.saw_tstamp)
+               tcp_clear_options(&tmp_opt);
+
+       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
+       tcp_openreq_init(req, &tmp_opt, skb, sk);
+
+       af_ops->init_req(req, sk, skb);
+
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
+       if (!want_cookie || tmp_opt.tstamp_ok)
+               TCP_ECN_create_request(req, skb, sock_net(sk));
+
+       if (want_cookie) {
+               isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+               req->cookie_ts = tmp_opt.tstamp_ok;
+       } else if (!isn) {
+               /* VJ's idea. We save last timestamp seen
+                * from the destination in peer table, when entering
+                * state TIME-WAIT, and check against it before
+                * accepting new connection request.
+                *
+                * If "isn" is not zero, this request hit alive
+                * timewait bucket, so that all the necessary checks
+                * are made in the function processing timewait state.
+                */
+               if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
+                       bool strict;
+
+                       dst = af_ops->route_req(sk, &fl, req, &strict);
+                       if (dst && strict &&
+                           !tcp_peer_is_proven(req, dst, true)) {
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+                               goto drop_and_release;
+                       }
+               }
+               /* Kill the following clause, if you dislike this way. */
+               else if (!sysctl_tcp_syncookies &&
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (sysctl_max_syn_backlog >> 2)) &&
+                        !tcp_peer_is_proven(req, dst, false)) {
+                       /* Without syncookies last quarter of
+                        * backlog is filled with destinations,
+                        * proven to be alive.
+                        * It means that we continue to communicate
+                        * to destinations, already remembered
+                        * to the moment of synflood.
+                        */
+                       pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
+                                   rsk_ops->family);
+                       goto drop_and_release;
+               }
+
+               isn = af_ops->init_seq(skb);
+       }
+       if (!dst) {
+               dst = af_ops->route_req(sk, &fl, req, NULL);
+               if (!dst)
+                       goto drop_and_free;
+       }
+
+       tcp_rsk(req)->snt_isn = isn;
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = af_ops->send_synack(sk, dst, &fl, req,
+                                 skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->listener = NULL;
+               af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       }
+
+       return 0;
+
+drop_and_release:
+       dst_release(dst);
+drop_and_free:
+       reqsk_free(req);
+drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       return 0;
+}
+EXPORT_SYMBOL(tcp_conn_request);
index 77cccda1ad0c6dc62c8cb70d932eca2322304c81..1edc739b9da59e29decbc304e9c5dd037634bf09 100644 (file)
@@ -99,7 +99,7 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 struct inet_hashinfo tcp_hashinfo;
 EXPORT_SYMBOL(tcp_hashinfo);
 
-static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
+static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 {
        return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
                                          ip_hdr(skb)->saddr,
@@ -208,6 +208,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_dport = usin->sin_port;
        inet->inet_daddr = daddr;
 
+       inet_set_txhash(sk);
+
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -814,6 +816,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  *     socket.
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+                             struct flowi *fl,
                              struct request_sock *req,
                              u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc)
@@ -837,24 +840,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
-               if (!tcp_rsk(req)->snt_synack && !err)
-                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
 }
 
-static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
-{
-       int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
-
-       if (!res) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-       }
-       return res;
-}
-
 /*
  *     IPv4 request_sock destructor.
  */
@@ -1237,161 +1227,68 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 
 #endif
 
+static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
+                           struct sk_buff *skb)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
+       ireq->no_srccheck = inet_sk(sk)->transparent;
+       ireq->opt = tcp_v4_save_options(skb);
+}
+
+static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
+                                         const struct request_sock *req,
+                                         bool *strict)
+{
+       struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
+
+       if (strict) {
+               if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
+                       *strict = true;
+               else
+                       *strict = false;
+       }
+
+       return dst;
+}
+
 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
        .family         =       PF_INET,
        .obj_size       =       sizeof(struct tcp_request_sock),
-       .rtx_syn_ack    =       tcp_v4_rtx_synack,
+       .rtx_syn_ack    =       tcp_rtx_synack,
        .send_ack       =       tcp_v4_reqsk_send_ack,
        .destructor     =       tcp_v4_reqsk_destructor,
        .send_reset     =       tcp_v4_send_reset,
        .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
-#ifdef CONFIG_TCP_MD5SIG
 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+       .mss_clamp      =       TCP_MSS_DEFAULT,
+#ifdef CONFIG_TCP_MD5SIG
        .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
-};
 #endif
+       .init_req       =       tcp_v4_init_req,
+#ifdef CONFIG_SYN_COOKIES
+       .cookie_init_seq =      cookie_v4_init_sequence,
+#endif
+       .route_req      =       tcp_v4_route_req,
+       .init_seq       =       tcp_v4_init_sequence,
+       .send_synack    =       tcp_v4_send_synack,
+       .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
+};
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_options_received tmp_opt;
-       struct request_sock *req;
-       struct inet_request_sock *ireq;
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct dst_entry *dst = NULL;
-       __be32 saddr = ip_hdr(skb)->saddr;
-       __be32 daddr = ip_hdr(skb)->daddr;
-       __u32 isn = TCP_SKB_CB(skb)->when;
-       bool want_cookie = false, fastopen;
-       struct flowi4 fl4;
-       struct tcp_fastopen_cookie foc = { .len = -1 };
-       int err;
-
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
                goto drop;
 
-       /* TW buckets are converted to open requests without
-        * limitations, they conserve resources and peer is
-        * evidently real one.
-        */
-       if ((sysctl_tcp_syncookies == 2 ||
-            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
-               want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
-               if (!want_cookie)
-                       goto drop;
-       }
-
-       /* Accept backlog is full. If we have already queued enough
-        * of warm entries in syn queue, drop request. It is better than
-        * clogging syn queue with openreqs with exponentially increasing
-        * timeout.
-        */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-               goto drop;
-       }
-
-       req = inet_reqsk_alloc(&tcp_request_sock_ops);
-       if (!req)
-               goto drop;
-
-#ifdef CONFIG_TCP_MD5SIG
-       tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
-#endif
-
-       tcp_clear_options(&tmp_opt);
-       tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
-       tmp_opt.user_mss  = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
-
-       if (want_cookie && !tmp_opt.saw_tstamp)
-               tcp_clear_options(&tmp_opt);
-
-       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
-       tcp_openreq_init(req, &tmp_opt, skb);
-
-       ireq = inet_rsk(req);
-       ireq->ir_loc_addr = daddr;
-       ireq->ir_rmt_addr = saddr;
-       ireq->no_srccheck = inet_sk(sk)->transparent;
-       ireq->opt = tcp_v4_save_options(skb);
-       ireq->ir_mark = inet_request_mark(sk, skb);
-
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_free;
-
-       if (!want_cookie || tmp_opt.tstamp_ok)
-               TCP_ECN_create_request(req, skb, sock_net(sk));
-
-       if (want_cookie) {
-               isn = cookie_v4_init_sequence(sk, skb, &req->mss);
-               req->cookie_ts = tmp_opt.tstamp_ok;
-       } else if (!isn) {
-               /* VJ's idea. We save last timestamp seen
-                * from the destination in peer table, when entering
-                * state TIME-WAIT, and check against it before
-                * accepting new connection request.
-                *
-                * If "isn" is not zero, this request hit alive
-                * timewait bucket, so that all the necessary checks
-                * are made in the function processing timewait state.
-                */
-               if (tmp_opt.saw_tstamp &&
-                   tcp_death_row.sysctl_tw_recycle &&
-                   (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
-                   fl4.daddr == saddr) {
-                       if (!tcp_peer_is_proven(req, dst, true)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
-                               goto drop_and_release;
-                       }
-               }
-               /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
-                        !tcp_peer_is_proven(req, dst, false)) {
-                       /* Without syncookies last quarter of
-                        * backlog is filled with destinations,
-                        * proven to be alive.
-                        * It means that we continue to communicate
-                        * to destinations, already remembered
-                        * to the moment of synflood.
-                        */
-                       LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
-                                      &saddr, ntohs(tcp_hdr(skb)->source));
-                       goto drop_and_release;
-               }
+       return tcp_conn_request(&tcp_request_sock_ops,
+                               &tcp_request_sock_ipv4_ops, sk, skb);
 
-               isn = tcp_v4_init_sequence(skb);
-       }
-       if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
-               goto drop_and_free;
-
-       tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_openreq_init_rwin(req, sk, dst);
-       fastopen = !want_cookie &&
-                  tcp_try_fastopen(sk, skb, req, &foc, dst);
-       err = tcp_v4_send_synack(sk, dst, req,
-                                skb_get_queue_mapping(skb), &foc);
-       if (!fastopen) {
-               if (err || want_cookie)
-                       goto drop_and_free;
-
-               tcp_rsk(req)->snt_synack = tcp_time_stamp;
-               tcp_rsk(req)->listener = NULL;
-               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-       }
-
-       return 0;
-
-drop_and_release:
-       dst_release(dst);
-drop_and_free:
-       reqsk_free(req);
 drop:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
@@ -1439,6 +1336,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
+       inet_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
index e68e0d4af6c97bcd0f8c983890ba555adbfb3a00..1649988bd1b632f09506da9e685dc643c64161fd 100644 (file)
@@ -298,7 +298,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_flowlabel = np->flow_label >> 12;
-                       tw->tw_ipv6only = np->ipv6only;
+                       tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
 
index 179b51e6bda339f37a386d5118e1f15f41bbccf7..8fcfc91964ecb5226d65ef24974eee70fe225bbc 100644 (file)
@@ -916,6 +916,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = tcp_wfree;
+       skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
@@ -978,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (likely(err <= 0))
                return err;
 
-       tcp_enter_cwr(sk, 1);
+       tcp_enter_cwr(sk);
 
        return net_xmit_eval(err);
 }
@@ -3301,3 +3302,18 @@ void tcp_send_probe0(struct sock *sk)
                                          TCP_RTO_MAX);
        }
 }
+
+int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
+{
+       const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
+       struct flowi fl;
+       int res;
+
+       res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
+       if (!res) {
+               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+       }
+       return res;
+}
+EXPORT_SYMBOL(tcp_rtx_synack);
index 7d5a8661df769d95e05c8214ecd5afc8f0144d26..f31053b90ee069d38942ef5d69f459d5ac6da0b1 100644 (file)
@@ -594,27 +594,6 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
        return true;
 }
 
-static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
-                                            __be16 loc_port, __be32 loc_addr,
-                                            __be16 rmt_port, __be32 rmt_addr,
-                                            int dif)
-{
-       struct hlist_nulls_node *node;
-       struct sock *s = sk;
-       unsigned short hnum = ntohs(loc_port);
-
-       sk_nulls_for_each_from(s, node) {
-               if (__udp_is_mcast_sock(net, s,
-                                       loc_port, loc_addr,
-                                       rmt_port, rmt_addr,
-                                       dif, hnum))
-                       goto found;
-       }
-       s = NULL;
-found:
-       return s;
-}
-
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -1640,6 +1619,8 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
                if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
                        skb1 = NULL;
+
+               sock_put(sk);
        }
        if (unlikely(skb1))
                kfree_skb(skb1);
@@ -1668,41 +1649,50 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
                                    struct udp_table *udptable)
 {
        struct sock *sk, *stack[256 / sizeof(struct sock *)];
-       struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
-       int dif;
-       unsigned int i, count = 0;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(uh->dest);
+       struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+       int dif = skb->dev->ifindex;
+       unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+       unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+
+       if (use_hash2) {
+               hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+                           udp_table.mask;
+               hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+start_lookup:
+               hslot = &udp_table.hash2[hash2];
+               offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
+       }
 
        spin_lock(&hslot->lock);
-       sk = sk_nulls_head(&hslot->head);
-       dif = skb->dev->ifindex;
-       sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
-       while (sk) {
-               stack[count++] = sk;
-               sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
-                                      daddr, uh->source, saddr, dif);
-               if (unlikely(count == ARRAY_SIZE(stack))) {
-                       if (!sk)
-                               break;
-                       flush_stack(stack, count, skb, ~0);
-                       count = 0;
+       sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
+               if (__udp_is_mcast_sock(net, sk,
+                                       uh->dest, daddr,
+                                       uh->source, saddr,
+                                       dif, hnum)) {
+                       if (unlikely(count == ARRAY_SIZE(stack))) {
+                               flush_stack(stack, count, skb, ~0);
+                               count = 0;
+                       }
+                       stack[count++] = sk;
+                       sock_hold(sk);
                }
        }
-       /*
-        * before releasing chain lock, we must take a reference on sockets
-        */
-       for (i = 0; i < count; i++)
-               sock_hold(stack[i]);
 
        spin_unlock(&hslot->lock);
 
+       /* Also lookup *:port if we are using hash2 and haven't done so yet. */
+       if (use_hash2 && hash2 != hash2_any) {
+               hash2 = hash2_any;
+               goto start_lookup;
+       }
+
        /*
         * do the slow work with no lock held
         */
        if (count) {
                flush_stack(stack, count, skb, count - 1);
-
-               for (i = 0; i < count; i++)
-                       sock_put(stack[i]);
        } else {
                kfree_skb(skb);
        }
@@ -2526,79 +2516,3 @@ void __init udp_init(void)
        sysctl_udp_rmem_min = SK_MEM_QUANTUM;
        sysctl_udp_wmem_min = SK_MEM_QUANTUM;
 }
-
-struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-                                      netdev_features_t features)
-{
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
-       u16 mac_offset = skb->mac_header;
-       int mac_len = skb->mac_len;
-       int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
-       __be16 protocol = skb->protocol;
-       netdev_features_t enc_features;
-       int udp_offset, outer_hlen;
-       unsigned int oldlen;
-       bool need_csum;
-
-       oldlen = (u16)~skb->len;
-
-       if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
-               goto out;
-
-       skb->encapsulation = 0;
-       __skb_pull(skb, tnl_hlen);
-       skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb_inner_network_offset(skb));
-       skb->mac_len = skb_inner_network_offset(skb);
-       skb->protocol = htons(ETH_P_TEB);
-
-       need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
-       if (need_csum)
-               skb->encap_hdr_csum = 1;
-
-       /* segment inner packet. */
-       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
-       segs = skb_mac_gso_segment(skb, enc_features);
-       if (!segs || IS_ERR(segs)) {
-               skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
-                                    mac_len);
-               goto out;
-       }
-
-       outer_hlen = skb_tnl_header_len(skb);
-       udp_offset = outer_hlen - tnl_hlen;
-       skb = segs;
-       do {
-               struct udphdr *uh;
-               int len;
-
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-
-               skb->mac_len = mac_len;
-
-               skb_push(skb, outer_hlen);
-               skb_reset_mac_header(skb);
-               skb_set_network_header(skb, mac_len);
-               skb_set_transport_header(skb, udp_offset);
-               len = skb->len - udp_offset;
-               uh = udp_hdr(skb);
-               uh->len = htons(len);
-
-               if (need_csum) {
-                       __be32 delta = htonl(oldlen + len);
-
-                       uh->check = ~csum_fold((__force __wsum)
-                                              ((__force u32)uh->check +
-                                               (__force u32)delta));
-                       uh->check = gso_make_checksum(skb, ~uh->check);
-
-                       if (uh->check == 0)
-                               uh->check = CSUM_MANGLED_0;
-               }
-
-               skb->protocol = protocol;
-       } while ((skb = skb->next));
-out:
-       return segs;
-}
index 546d2d439dda65a195f7e635b7dc564def1da077..4807544d018bd52af5da86e7388853fb04c57ce9 100644 (file)
@@ -47,6 +47,82 @@ static int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
+       int mac_len = skb->mac_len;
+       int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
+       __be16 protocol = skb->protocol;
+       netdev_features_t enc_features;
+       int udp_offset, outer_hlen;
+       unsigned int oldlen;
+       bool need_csum;
+
+       oldlen = (u16)~skb->len;
+
+       if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
+               goto out;
+
+       skb->encapsulation = 0;
+       __skb_pull(skb, tnl_hlen);
+       skb_reset_mac_header(skb);
+       skb_set_network_header(skb, skb_inner_network_offset(skb));
+       skb->mac_len = skb_inner_network_offset(skb);
+       skb->protocol = htons(ETH_P_TEB);
+
+       need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
+       if (need_csum)
+               skb->encap_hdr_csum = 1;
+
+       /* segment inner packet. */
+       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       segs = skb_mac_gso_segment(skb, enc_features);
+       if (!segs || IS_ERR(segs)) {
+               skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+                                    mac_len);
+               goto out;
+       }
+
+       outer_hlen = skb_tnl_header_len(skb);
+       udp_offset = outer_hlen - tnl_hlen;
+       skb = segs;
+       do {
+               struct udphdr *uh;
+               int len;
+
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+
+               skb->mac_len = mac_len;
+
+               skb_push(skb, outer_hlen);
+               skb_reset_mac_header(skb);
+               skb_set_network_header(skb, mac_len);
+               skb_set_transport_header(skb, udp_offset);
+               len = skb->len - udp_offset;
+               uh = udp_hdr(skb);
+               uh->len = htons(len);
+
+               if (need_csum) {
+                       __be32 delta = htonl(oldlen + len);
+
+                       uh->check = ~csum_fold((__force __wsum)
+                                              ((__force u32)uh->check +
+                                               (__force u32)delta));
+                       uh->check = gso_make_checksum(skb, ~uh->check);
+
+                       if (uh->check == 0)
+                               uh->check = CSUM_MANGLED_0;
+               }
+
+               skb->protocol = protocol;
+       } while ((skb = skb->next));
+out:
+       return segs;
+}
+
 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                                         netdev_features_t features)
 {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
new file mode 100644 (file)
index 0000000..61ec1a6
--- /dev/null
@@ -0,0 +1,100 @@
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/net_namespace.h>
+
+int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+                   struct socket **sockp)
+{
+       int err = -EINVAL;
+       struct socket *sock = NULL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (cfg->family == AF_INET6) {
+               struct sockaddr_in6 udp6_addr;
+
+               err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+               if (err < 0)
+                       goto error;
+
+               sk_change_net(sock->sk, net);
+
+               udp6_addr.sin6_family = AF_INET6;
+               memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
+                      sizeof(udp6_addr.sin6_addr));
+               udp6_addr.sin6_port = cfg->local_udp_port;
+               err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
+                                 sizeof(udp6_addr));
+               if (err < 0)
+                       goto error;
+
+               if (cfg->peer_udp_port) {
+                       udp6_addr.sin6_family = AF_INET6;
+                       memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
+                              sizeof(udp6_addr.sin6_addr));
+                       udp6_addr.sin6_port = cfg->peer_udp_port;
+                       err = kernel_connect(sock,
+                                            (struct sockaddr *)&udp6_addr,
+                                            sizeof(udp6_addr), 0);
+               }
+               if (err < 0)
+                       goto error;
+
+               udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
+               udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);
+       } else
+#endif
+       if (cfg->family == AF_INET) {
+               struct sockaddr_in udp_addr;
+
+               err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+               if (err < 0)
+                       goto error;
+
+               sk_change_net(sock->sk, net);
+
+               udp_addr.sin_family = AF_INET;
+               udp_addr.sin_addr = cfg->local_ip;
+               udp_addr.sin_port = cfg->local_udp_port;
+               err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
+                                 sizeof(udp_addr));
+               if (err < 0)
+                       goto error;
+
+               if (cfg->peer_udp_port) {
+                       udp_addr.sin_family = AF_INET;
+                       udp_addr.sin_addr = cfg->peer_ip;
+                       udp_addr.sin_port = cfg->peer_udp_port;
+                       err = kernel_connect(sock,
+                                            (struct sockaddr *)&udp_addr,
+                                            sizeof(udp_addr), 0);
+                       if (err < 0)
+                               goto error;
+               }
+
+               sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
+       } else {
+               return -EPFNOSUPPORT;
+       }
+
+
+       *sockp = sock;
+
+       return 0;
+
+error:
+       if (sock) {
+               kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sock->sk);
+       }
+       *sockp = NULL;
+       return err;
+}
+EXPORT_SYMBOL(udp_sock_create);
+
+MODULE_LICENSE("GPL");
index 5667b3003af9b51779ff322717e999282113c4b7..4c03c28430945930a4aaacc5b55742b7f7488bf9 100644 (file)
@@ -186,6 +186,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .max_desync_factor      = MAX_DESYNC_FACTOR,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
+       .accept_ra_from_local   = 0,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -222,6 +223,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .max_desync_factor      = MAX_DESYNC_FACTOR,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
+       .accept_ra_from_local   = 0,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -2728,9 +2730,25 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
        }
 }
 
+static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
+{
+       if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
+               struct in6_addr addr;
+
+               ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
+               /* addrconf_add_linklocal also adds a prefix_route and we
+                * only need to care about prefix routes if ipv6_generate_eui64
+                * couldn't generate one.
+                */
+               if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
+                       addrconf_add_linklocal(idev, &addr);
+               else if (prefix_route)
+                       addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
+       }
+}
+
 static void addrconf_dev_config(struct net_device *dev)
 {
-       struct in6_addr addr;
        struct inet6_dev *idev;
 
        ASSERT_RTNL();
@@ -2751,11 +2769,7 @@ static void addrconf_dev_config(struct net_device *dev)
        if (IS_ERR(idev))
                return;
 
-       memset(&addr, 0, sizeof(struct in6_addr));
-       addr.s6_addr32[0] = htonl(0xFE800000);
-
-       if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
-               addrconf_add_linklocal(idev, &addr);
+       addrconf_addr_gen(idev, false);
 }
 
 #if IS_ENABLED(CONFIG_IPV6_SIT)
@@ -2777,11 +2791,7 @@ static void addrconf_sit_config(struct net_device *dev)
        }
 
        if (dev->priv_flags & IFF_ISATAP) {
-               struct in6_addr addr;
-
-               ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
-               if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
-                       addrconf_add_linklocal(idev, &addr);
+               addrconf_addr_gen(idev, false);
                return;
        }
 
@@ -2796,7 +2806,6 @@ static void addrconf_sit_config(struct net_device *dev)
 static void addrconf_gre_config(struct net_device *dev)
 {
        struct inet6_dev *idev;
-       struct in6_addr addr;
 
        ASSERT_RTNL();
 
@@ -2805,11 +2814,7 @@ static void addrconf_gre_config(struct net_device *dev)
                return;
        }
 
-       ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
-       if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
-               addrconf_add_linklocal(idev, &addr);
-       else
-               addrconf_prefix_route(&addr, 64, dev, 0, 0);
+       addrconf_addr_gen(idev, true);
 }
 #endif
 
@@ -4321,6 +4326,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
        array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
        array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
+       array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4420,6 +4426,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
        nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
        if (nla == NULL)
                goto nla_put_failure;
+
+       if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
+               goto nla_put_failure;
+
        read_lock_bh(&idev->lock);
        memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
        read_unlock_bh(&idev->lock);
@@ -4524,8 +4534,21 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
        if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0)
                BUG();
 
-       if (tb[IFLA_INET6_TOKEN])
+       if (tb[IFLA_INET6_TOKEN]) {
                err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+               if (err)
+                       return err;
+       }
+
+       if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
+               u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
+
+               if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
+                   mode != IN6_ADDR_GEN_MODE_NONE)
+                       return -EINVAL;
+               idev->addr_gen_mode = mode;
+               err = 0;
+       }
 
        return err;
 }
@@ -5167,6 +5190,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec
                },
+               {
+                       .procname       = "accept_ra_from_local",
+                       .data           = &ipv6_devconf.accept_ra_from_local,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        /* sentinel */
                }
index 7cb4392690dd614b1672ad51cc57bbe36218e34c..2daa3a133e498cdccfe5695ee62db7c72da8ce25 100644 (file)
@@ -197,7 +197,7 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->ipv6only    = net->ipv6.sysctl.bindv6only;
+       sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
        /* Init the ipv4 part of the socket since we can have sockets
         * using v6 API for ipv4.
@@ -294,7 +294,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                /* Binding to v4-mapped address on a v6-only socket
                 * makes no sense
                 */
-               if (np->ipv6only) {
+               if (sk->sk_ipv6only) {
                        err = -EINVAL;
                        goto out;
                }
@@ -371,7 +371,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_type != IPV6_ADDR_ANY) {
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
                if (addr_type != IPV6_ADDR_MAPPED)
-                       np->ipv6only = 1;
+                       sk->sk_ipv6only = 1;
        }
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -765,6 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
+       net->ipv6.sysctl.auto_flowlabels = 0;
        atomic_set(&net->ipv6.rt_genid, 0);
 
        err = ipv6_init_mibs(net);
index c3bf2d2e519ea3ec86c07c43dffaff85fad80b5e..2753319524f1acabb34a0520ea29ee361c1dfe9e 100644 (file)
@@ -199,6 +199,7 @@ ipv4_connected:
                      NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
+       ip6_set_txhash(sk);
 out:
        fl6_sock_release(flowlabel);
        return err;
index 3873181ed85614a28f9857d7d53acf2be9d2b9fb..5f19dfbc4c6a4039ae2a22eb065bed6d46565d83 100644 (file)
@@ -322,7 +322,8 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
        else
                strcpy(name, "ip6gre%d");
 
-       dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ip6gre_tunnel_setup);
        if (!dev)
                return NULL;
 
@@ -723,7 +724,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
         *      Push down and install the IP header.
         */
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = tunnel->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1174,7 +1176,9 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
        struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
        __be16 *p = (__be16 *)(ipv6h+1);
 
-       ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
+       ip6_flow_hdr(ipv6h, 0,
+                    ip6_make_flowlabel(dev_net(dev), skb,
+                                       t->fl.u.ip6.flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
@@ -1323,7 +1327,8 @@ static int __net_init ip6gre_init_net(struct net *net)
        int err;
 
        ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
-                                          ip6gre_tunnel_setup);
+                                         NET_NAME_UNKNOWN,
+                                         ip6gre_tunnel_setup);
        if (!ign->fb_tunnel_dev) {
                err = -ENOMEM;
                goto err_alloc_dev;
index cb9df0eb40237065e696dd1013180b6c82a9c2dc..759456f0c207c72a544c333a902234c8abd0e598 100644 (file)
@@ -205,7 +205,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        if (hlimit < 0)
                hlimit = ip6_dst_hoplimit(dst);
 
-       ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                                    np->autoflowlabel));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -800,8 +801,8 @@ slow_path:
                /*
                 *      Copy a block of the IP datagram.
                 */
-               if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
-                       BUG();
+               BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
+                                    len));
                left -= len;
 
                fh->frag_off = htons(offset);
@@ -1270,7 +1271,7 @@ emsgsize:
        }
 
        /* For UDP, check if TX timestamp is enabled */
-       if (sk->sk_type == SOCK_DGRAM)
+       if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW)
                sock_tx_timestamp(sk, &tx_flags);
 
        /*
@@ -1379,12 +1380,6 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
-                               else {
-                                       /* Only the initial fragment
-                                        * is time stamped.
-                                        */
-                                       tx_flags = 0;
-                               }
                        }
                        if (skb == NULL)
                                goto error;
@@ -1398,8 +1393,9 @@ alloc_new_skb:
                        skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
                                    dst_exthdrlen);
 
-                       if (sk->sk_type == SOCK_DGRAM)
-                               skb_shinfo(skb)->tx_flags = tx_flags;
+                       /* Only the initial fragment is time stamped */
+                       skb_shinfo(skb)->tx_flags = tx_flags;
+                       tx_flags = 0;
 
                        /*
                         *      Find where to start putting bytes
@@ -1569,7 +1565,9 @@ int ip6_push_pending_frames(struct sock *sk)
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
 
-       ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, np->cork.tclass,
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                       np->autoflowlabel));
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index afa082458360216ff33012ee43e93354888e44b9..f9de5a69507252a12cbf1efffbf416721d9c871a 100644 (file)
@@ -315,7 +315,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
        else
                sprintf(name, "ip6tnl%%d");
 
-       dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ip6_tnl_dev_setup);
        if (dev == NULL)
                goto failed;
 
@@ -1046,7 +1047,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1772,7 +1774,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
 
        err = -ENOMEM;
        ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
-                                     ip6_tnl_dev_setup);
+                                       NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
 
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
index 9aaa6bb229e485fd657a5ca4bd30b6ebb9e90c5f..17ee4fc32dfed1947fc397a805ce72c604d2727b 100644 (file)
@@ -204,7 +204,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
        else
                sprintf(name, "ip6_vti%%d");
 
-       dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
        if (dev == NULL)
                goto failed;
 
@@ -1020,7 +1020,7 @@ static int __net_init vti6_init_net(struct net *net)
 
        err = -ENOMEM;
        ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
-                                       vti6_dev_setup);
+                                       NET_NAME_UNKNOWN, vti6_dev_setup);
 
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
index 8250474ab7dc0e10b3340bca3e68aaf9377a81f2..f9a3fd320d1df23ca1140e3d39e46b4d14a234be 100644 (file)
@@ -744,7 +744,7 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
        else
                sprintf(name, "pim6reg%u", mrt->id);
 
-       dev = alloc_netdev(0, name, reg_vif_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
        if (dev == NULL)
                return NULL;
 
index edb58aff4ae70ac864f3f2b559815064f4db87f6..0c289982796dfb6e01a761ff4d49616700da3ba8 100644 (file)
@@ -235,7 +235,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                if (optlen < sizeof(int) ||
                    inet_sk(sk)->inet_num)
                        goto e_inval;
-               np->ipv6only = valbool;
+               sk->sk_ipv6only = valbool;
                retv = 0;
                break;
 
@@ -834,6 +834,10 @@ pref_skip_coa:
                np->dontfrag = valbool;
                retv = 0;
                break;
+       case IPV6_AUTOFLOWLABEL:
+               np->autoflowlabel = valbool;
+               retv = 0;
+               break;
        }
 
        release_sock(sk);
@@ -1058,7 +1062,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
        }
 
        case IPV6_V6ONLY:
-               val = np->ipv6only;
+               val = sk->sk_ipv6only;
                break;
 
        case IPV6_RECVPKTINFO:
@@ -1158,7 +1162,6 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                        return -EFAULT;
 
                return 0;
-               break;
        }
 
        case IPV6_TRANSPARENT:
@@ -1273,6 +1276,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                val = np->dontfrag;
                break;
 
+       case IPV6_AUTOFLOWLABEL:
+               val = np->autoflowlabel;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index ca8d4ea48a5d9fa641bf129a6fc5e3b428799fa4..b7ece278dd492453a2c4f3b42559fc4deef1e429 100644 (file)
@@ -1070,6 +1070,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
                sizeof(struct ra_msg);
 
+       ND_PRINTK(2, info,
+                 "RA: %s, dev: %s\n",
+                 __func__, skb->dev->name);
        if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK(2, warn, "RA: source address is not link-local\n");
                return;
@@ -1102,13 +1105,21 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                return;
        }
 
-       if (!ipv6_accept_ra(in6_dev))
+       if (!ipv6_accept_ra(in6_dev)) {
+               ND_PRINTK(2, info,
+                         "RA: %s, did not accept ra for dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_linkparms;
+       }
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        /* skip link-specific parameters from interior routers */
-       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
+       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
+               ND_PRINTK(2, info,
+                         "RA: %s, nodetype is NODEFAULT, dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_linkparms;
+       }
 #endif
 
        if (in6_dev->if_flags & IF_RS_SENT) {
@@ -1130,11 +1141,24 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                                (ra_msg->icmph.icmp6_addrconf_other ?
                                        IF_RA_OTHERCONF : 0);
 
-       if (!in6_dev->cnf.accept_ra_defrtr)
+       if (!in6_dev->cnf.accept_ra_defrtr) {
+               ND_PRINTK(2, info,
+                         "RA: %s, defrtr is false for dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_defrtr;
+       }
 
-       if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+       /* Do not accept RA with source-addr found on local machine unless
+        * accept_ra_from_local is set to true.
+        */
+       if (!in6_dev->cnf.accept_ra_from_local &&
+           ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
+                         NULL, 0)) {
+               ND_PRINTK(2, info,
+                         "RA from local address detected on dev: %s: default router ignored\n",
+                         skb->dev->name);
                goto skip_defrtr;
+       }
 
        lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
 
@@ -1163,8 +1187,10 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                rt = NULL;
        }
 
+       ND_PRINTK(3, info, "RA: rt: %p  lifetime: %d, for dev: %s\n",
+                 rt, lifetime, skb->dev->name);
        if (rt == NULL && lifetime) {
-               ND_PRINTK(3, dbg, "RA: adding default router\n");
+               ND_PRINTK(3, info, "RA: adding default router\n");
 
                rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
                if (rt == NULL) {
@@ -1260,12 +1286,22 @@ skip_linkparms:
                             NEIGH_UPDATE_F_ISROUTER);
        }
 
-       if (!ipv6_accept_ra(in6_dev))
+       if (!ipv6_accept_ra(in6_dev)) {
+               ND_PRINTK(2, info,
+                         "RA: %s, accept_ra is false for dev: %s\n",
+                         __func__, skb->dev->name);
                goto out;
+       }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-       if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+       if (!in6_dev->cnf.accept_ra_from_local &&
+           ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
+                         NULL, 0)) {
+               ND_PRINTK(2, info,
+                         "RA from local address detected on dev: %s: router info ignored.\n",
+                         skb->dev->name);
                goto skip_routeinfo;
+       }
 
        if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
                struct nd_opt_hdr *p;
@@ -1293,8 +1329,12 @@ skip_routeinfo:
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        /* skip link-specific ndopts from interior routers */
-       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
+       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
+               ND_PRINTK(2, info,
+                         "RA: %s, nodetype is NODEFAULT (interior routes), dev: %s\n",
+                         __func__, skb->dev->name);
                goto out;
+       }
 #endif
 
        if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
index 4bff1f297e39a4affcf82e6b7aca2e6078b4dc50..ac93df16f5af678f8724346343517ef241a415f3 100644 (file)
@@ -55,6 +55,11 @@ config NFT_REJECT_IPV6
        default NFT_REJECT
        tristate
 
+config NF_LOG_IPV6
+       tristate "IPv6 packet logging"
+       depends on NETFILTER_ADVANCED
+       select NF_LOG_COMMON
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
index 70d3dd66f2cdbf1408d328fd06104671446d5bc9..c0b263104ed23170e15b3278408fe95f36c77da9 100644 (file)
@@ -23,6 +23,9 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
+# logging
+obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
index 54bd9790603f253a417971eea50a4157fd0c75ae..8b147440fbdced8dbc23023785596f0565b6ddef 100644 (file)
@@ -94,7 +94,6 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                        break;
                default:
                        return false;
-                       break;
                }
 
                nexthdr = hp->nexthdr;
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
new file mode 100644 (file)
index 0000000..7b17a0b
--- /dev/null
@@ -0,0 +1,417 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+/* One level of recursion won't kill us */
+static void dump_ipv6_packet(struct nf_log_buf *m,
+                            const struct nf_loginfo *info,
+                            const struct sk_buff *skb, unsigned int ip6hoff,
+                            int recurse)
+{
+       u_int8_t currenthdr;
+       int fragment;
+       struct ipv6hdr _ip6h;
+       const struct ipv6hdr *ih;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int logflags;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+       else
+               logflags = NF_LOG_MASK;
+
+       ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
+       if (ih == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+
+       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+       nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
+
+       /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
+       nf_log_buf_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
+              ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
+              (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
+              ih->hop_limit,
+              (ntohl(*(__be32 *)ih) & 0x000fffff));
+
+       fragment = 0;
+       ptr = ip6hoff + sizeof(struct ipv6hdr);
+       currenthdr = ih->nexthdr;
+       while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
+               struct ipv6_opt_hdr _hdr;
+               const struct ipv6_opt_hdr *hp;
+
+               hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
+               if (hp == NULL) {
+                       nf_log_buf_add(m, "TRUNCATED");
+                       return;
+               }
+
+               /* Max length: 48 "OPT (...) " */
+               if (logflags & XT_LOG_IPOPT)
+                       nf_log_buf_add(m, "OPT ( ");
+
+               switch (currenthdr) {
+               case IPPROTO_FRAGMENT: {
+                       struct frag_hdr _fhdr;
+                       const struct frag_hdr *fh;
+
+                       nf_log_buf_add(m, "FRAG:");
+                       fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
+                                               &_fhdr);
+                       if (fh == NULL) {
+                               nf_log_buf_add(m, "TRUNCATED ");
+                               return;
+                       }
+
+                       /* Max length: 6 "65535 " */
+                       nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
+
+                       /* Max length: 11 "INCOMPLETE " */
+                       if (fh->frag_off & htons(0x0001))
+                               nf_log_buf_add(m, "INCOMPLETE ");
+
+                       nf_log_buf_add(m, "ID:%08x ",
+                                      ntohl(fh->identification));
+
+                       if (ntohs(fh->frag_off) & 0xFFF8)
+                               fragment = 1;
+
+                       hdrlen = 8;
+
+                       break;
+               }
+               case IPPROTO_DSTOPTS:
+               case IPPROTO_ROUTING:
+               case IPPROTO_HOPOPTS:
+                       if (fragment) {
+                               if (logflags & XT_LOG_IPOPT)
+                                       nf_log_buf_add(m, ")");
+                               return;
+                       }
+                       hdrlen = ipv6_optlen(hp);
+                       break;
+               /* Max Length */
+               case IPPROTO_AH:
+                       if (logflags & XT_LOG_IPOPT) {
+                               struct ip_auth_hdr _ahdr;
+                               const struct ip_auth_hdr *ah;
+
+                               /* Max length: 3 "AH " */
+                               nf_log_buf_add(m, "AH ");
+
+                               if (fragment) {
+                                       nf_log_buf_add(m, ")");
+                                       return;
+                               }
+
+                               ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
+                                                       &_ahdr);
+                               if (ah == NULL) {
+                                       /*
+                                        * Max length: 26 "INCOMPLETE [65535
+                                        *  bytes] )"
+                                        */
+                                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
+                                                      skb->len - ptr);
+                                       return;
+                               }
+
+                               /* Length: 15 "SPI=0xF1234567 */
+                               nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
+
+                       }
+
+                       hdrlen = (hp->hdrlen+2)<<2;
+                       break;
+               case IPPROTO_ESP:
+                       if (logflags & XT_LOG_IPOPT) {
+                               struct ip_esp_hdr _esph;
+                               const struct ip_esp_hdr *eh;
+
+                               /* Max length: 4 "ESP " */
+                               nf_log_buf_add(m, "ESP ");
+
+                               if (fragment) {
+                                       nf_log_buf_add(m, ")");
+                                       return;
+                               }
+
+                               /*
+                                * Max length: 26 "INCOMPLETE [65535 bytes] )"
+                                */
+                               eh = skb_header_pointer(skb, ptr, sizeof(_esph),
+                                                       &_esph);
+                               if (eh == NULL) {
+                                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
+                                                      skb->len - ptr);
+                                       return;
+                               }
+
+                               /* Length: 16 "SPI=0xF1234567 )" */
+                               nf_log_buf_add(m, "SPI=0x%x )",
+                                              ntohl(eh->spi));
+                       }
+                       return;
+               default:
+                       /* Max length: 20 "Unknown Ext Hdr 255" */
+                       nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr);
+                       return;
+               }
+               if (logflags & XT_LOG_IPOPT)
+                       nf_log_buf_add(m, ") ");
+
+               currenthdr = hp->nexthdr;
+               ptr += hdrlen;
+       }
+
+       switch (currenthdr) {
+       case IPPROTO_TCP:
+               if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment,
+                                          ptr, logflags))
+                       return;
+               break;
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+               if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr))
+                       return;
+               break;
+       case IPPROTO_ICMPV6: {
+               struct icmp6hdr _icmp6h;
+               const struct icmp6hdr *ic;
+
+               /* Max length: 13 "PROTO=ICMPv6 " */
+               nf_log_buf_add(m, "PROTO=ICMPv6 ");
+
+               if (fragment)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
+               if (ic == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - ptr);
+                       return;
+               }
+
+               /* Max length: 18 "TYPE=255 CODE=255 " */
+               nf_log_buf_add(m, "TYPE=%u CODE=%u ",
+                              ic->icmp6_type, ic->icmp6_code);
+
+               switch (ic->icmp6_type) {
+               case ICMPV6_ECHO_REQUEST:
+               case ICMPV6_ECHO_REPLY:
+                       /* Max length: 19 "ID=65535 SEQ=65535 " */
+                       nf_log_buf_add(m, "ID=%u SEQ=%u ",
+                               ntohs(ic->icmp6_identifier),
+                               ntohs(ic->icmp6_sequence));
+                       break;
+               case ICMPV6_MGM_QUERY:
+               case ICMPV6_MGM_REPORT:
+               case ICMPV6_MGM_REDUCTION:
+                       break;
+
+               case ICMPV6_PARAMPROB:
+                       /* Max length: 17 "POINTER=ffffffff " */
+                       nf_log_buf_add(m, "POINTER=%08x ",
+                                      ntohl(ic->icmp6_pointer));
+                       /* Fall through */
+               case ICMPV6_DEST_UNREACH:
+               case ICMPV6_PKT_TOOBIG:
+               case ICMPV6_TIME_EXCEED:
+                       /* Max length: 3+maxlen */
+                       if (recurse) {
+                               nf_log_buf_add(m, "[");
+                               dump_ipv6_packet(m, info, skb,
+                                                ptr + sizeof(_icmp6h), 0);
+                               nf_log_buf_add(m, "] ");
+                       }
+
+                       /* Max length: 10 "MTU=65535 " */
+                       if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) {
+                               nf_log_buf_add(m, "MTU=%u ",
+                                              ntohl(ic->icmp6_mtu));
+                       }
+               }
+               break;
+       }
+       /* Max length: 10 "PROTO=255 " */
+       default:
+               nf_log_buf_add(m, "PROTO=%u ", currenthdr);
+       }
+
+       /* Max length: 15 "UID=4294967295 " */
+       if ((logflags & XT_LOG_UID) && recurse)
+               nf_log_dump_sk_uid_gid(m, skb->sk);
+
+       /* Max length: 16 "MARK=0xFFFFFFFF " */
+       if (recurse && skb->mark)
+               nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
+}
+
+static void dump_ipv6_mac_header(struct nf_log_buf *m,
+                                const struct nf_loginfo *info,
+                                const struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       unsigned int logflags = 0;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+
+       if (!(logflags & XT_LOG_MACDECODE))
+               goto fallback;
+
+       switch (dev->type) {
+       case ARPHRD_ETHER:
+               nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                      ntohs(eth_hdr(skb)->h_proto));
+               return;
+       default:
+               break;
+       }
+
+fallback:
+       nf_log_buf_add(m, "MAC=");
+       if (dev->hard_header_len &&
+           skb->mac_header != skb->network_header) {
+               const unsigned char *p = skb_mac_header(skb);
+               unsigned int len = dev->hard_header_len;
+               unsigned int i;
+
+               if (dev->type == ARPHRD_SIT) {
+                       p -= ETH_HLEN;
+
+                       if (p < skb->head)
+                               p = NULL;
+               }
+
+               if (p != NULL) {
+                       nf_log_buf_add(m, "%02x", *p++);
+                       for (i = 1; i < len; i++)
+                               nf_log_buf_add(m, ":%02x", *p++);
+               }
+               nf_log_buf_add(m, " ");
+
+               if (dev->type == ARPHRD_SIT) {
+                       const struct iphdr *iph =
+                               (struct iphdr *)skb_mac_header(skb);
+                       nf_log_buf_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
+                                      &iph->daddr);
+               }
+       } else {
+               nf_log_buf_add(m, " ");
+       }
+}
+
+static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
+                             unsigned int hooknum, const struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             const struct nf_loginfo *loginfo,
+                             const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
+                                 loginfo, prefix);
+
+       if (in != NULL)
+               dump_ipv6_mac_header(m, loginfo, skb);
+
+       dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_ip6_logger __read_mostly = {
+       .name           = "nf_log_ipv6",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_ip6_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_ipv6_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_ipv6_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_ip6_logger);
+}
+
+static struct pernet_operations nf_log_ipv6_net_ops = {
+       .init = nf_log_ipv6_net_init,
+       .exit = nf_log_ipv6_net_exit,
+};
+
+static int __init nf_log_ipv6_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_ipv6_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
+       return 0;
+}
+
+static void __exit nf_log_ipv6_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_ipv6_net_ops);
+       nf_log_unregister(&nf_ip6_logger);
+}
+
+module_init(nf_log_ipv6_init);
+module_exit(nf_log_ipv6_exit);
+
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
index abfe75a2e3167887531e5febe5f621ed03e37733..fc8e49b2ff3ed6b444cd93f7db75f202ef504b64 100644 (file)
@@ -158,6 +158,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
                                         htons(oldlen), htons(datalen), 1);
 }
 
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
                                       struct nf_nat_range *range)
 {
@@ -175,6 +176,7 @@ static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
 
        return 0;
 }
+#endif
 
 static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
        .l3proto                = NFPROTO_IPV6,
@@ -183,7 +185,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
        .manip_pkt              = nf_nat_ipv6_manip_pkt,
        .csum_update            = nf_nat_ipv6_csum_update,
        .csum_recalc            = nf_nat_ipv6_csum_recalc,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_ipv6_nlattr_to_range,
+#endif
 #ifdef CONFIG_XFRM
        .decode_session = nf_nat_ipv6_decode_session,
 #endif
index b2dc60b0c76403d38a6a96c8a1c299977ecb7bad..dee80fb1aa86299e34b2671e93cca036bb7ff3ee 100644 (file)
@@ -588,8 +588,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        }
 
        offset += skb_transport_offset(skb);
-       if (skb_copy_bits(skb, offset, &csum, 2))
-               BUG();
+       BUG_ON(skb_copy_bits(skb, offset, &csum, 2));
 
        /* in case cksum was not initialized */
        if (unlikely(csum))
@@ -601,8 +600,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
                csum = CSUM_MANGLED_0;
 
-       if (skb_store_bits(skb, offset, &csum, 2))
-               BUG();
+       BUG_ON(skb_store_bits(skb, offset, &csum, 2));
 
 send:
        err = ip6_push_pending_frames(sk);
index 4f408176dc64eeb306e25e5bde275581080fa523..2e9ba035fb5f51f33e7a1b13416064e57f8cb855 100644 (file)
@@ -250,7 +250,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        else
                strcpy(name, "sit%d");
 
-       dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ipip6_tunnel_setup);
        if (dev == NULL)
                return NULL;
 
@@ -1729,6 +1730,7 @@ static int __net_init sit_init_net(struct net *net)
        sitn->tunnels[3] = sitn->tunnels_r_l;
 
        sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
+                                          NET_NAME_UNKNOWN,
                                           ipip6_tunnel_setup);
        if (!sitn->fb_tunnel_dev) {
                err = -ENOMEM;
index a822b880689b5fea5adeed30956afd2328a9c8b9..83cea1d39466affce703d7c1b737a386bf3faecf 100644 (file)
@@ -187,7 +187,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
+       req = inet_reqsk_alloc(&tcp6_request_sock_ops);
        if (!req)
                goto out;
 
index 058f3eca2e53efd1fe016cfe8450d3ab0a9c13b1..5bf7b61f8ae8bc9403b9ea8b705f73d09eaccac5 100644 (file)
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "auto_flowlabels",
+               .data           = &init_net.ipv6.sysctl.auto_flowlabels,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {
                .procname       = "fwmark_reflect",
                .data           = &init_net.ipv6.sysctl.fwmark_reflect,
@@ -74,6 +81,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
        ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
        ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
+       ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 229239ad96b1645de84bfc5b0ad76311e295f82c..22055b098428df812ca0dfabe3e39420b650d321 100644 (file)
@@ -198,6 +198,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
+       ip6_set_txhash(sk);
+
        /*
         *      TCP over IPv4
         */
@@ -470,13 +472,14 @@ out:
 
 
 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
-                             struct flowi6 *fl6,
+                             struct flowi *fl,
                              struct request_sock *req,
                              u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flowi6 *fl6 = &fl->u.ip6;
        struct sk_buff *skb;
        int err = -ENOMEM;
 
@@ -503,18 +506,6 @@ done:
        return err;
 }
 
-static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
-{
-       struct flowi6 fl6;
-       int res;
-
-       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
-       if (!res) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-       }
-       return res;
-}
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
@@ -718,22 +709,66 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 }
 #endif
 
+static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
+                           struct sk_buff *skb)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+
+       ireq->ir_iif = sk->sk_bound_dev_if;
+
+       /* So that link locals have meaning */
+       if (!sk->sk_bound_dev_if &&
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
+
+       if (!TCP_SKB_CB(skb)->when &&
+           (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
+            np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
+            np->rxopt.bits.rxohlim || np->repflow)) {
+               atomic_inc(&skb->users);
+               ireq->pktopts = skb;
+       }
+}
+
+static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
+                                         const struct request_sock *req,
+                                         bool *strict)
+{
+       if (strict)
+               *strict = true;
+       return inet6_csk_route_req(sk, &fl->u.ip6, req);
+}
+
 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
        .family         =       AF_INET6,
        .obj_size       =       sizeof(struct tcp6_request_sock),
-       .rtx_syn_ack    =       tcp_v6_rtx_synack,
+       .rtx_syn_ack    =       tcp_rtx_synack,
        .send_ack       =       tcp_v6_reqsk_send_ack,
        .destructor     =       tcp_v6_reqsk_destructor,
        .send_reset     =       tcp_v6_send_reset,
        .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
-#ifdef CONFIG_TCP_MD5SIG
 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+       .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
+                               sizeof(struct ipv6hdr),
+#ifdef CONFIG_TCP_MD5SIG
        .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
-};
 #endif
+       .init_req       =       tcp_v6_init_req,
+#ifdef CONFIG_SYN_COOKIES
+       .cookie_init_seq =      cookie_v6_init_sequence,
+#endif
+       .route_req      =       tcp_v6_route_req,
+       .init_seq       =       tcp_v6_init_sequence,
+       .send_synack    =       tcp_v6_send_synack,
+       .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
+};
 
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
                                 u32 tsval, u32 tsecr, int oif,
@@ -973,153 +1008,17 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
        return sk;
 }
 
-/* FIXME: this is substantially similar to the ipv4 code.
- * Can some kind of merge be done? -- erics
- */
 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_options_received tmp_opt;
-       struct request_sock *req;
-       struct inet_request_sock *ireq;
-       struct ipv6_pinfo *np = inet6_sk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       __u32 isn = TCP_SKB_CB(skb)->when;
-       struct dst_entry *dst = NULL;
-       struct tcp_fastopen_cookie foc = { .len = -1 };
-       bool want_cookie = false, fastopen;
-       struct flowi6 fl6;
-       int err;
-
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
 
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
-       if ((sysctl_tcp_syncookies == 2 ||
-            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
-               want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
-               if (!want_cookie)
-                       goto drop;
-       }
-
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-               goto drop;
-       }
-
-       req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
-       if (req == NULL)
-               goto drop;
-
-#ifdef CONFIG_TCP_MD5SIG
-       tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
-#endif
-
-       tcp_clear_options(&tmp_opt);
-       tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
-       tmp_opt.user_mss = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+       return tcp_conn_request(&tcp6_request_sock_ops,
+                               &tcp_request_sock_ipv6_ops, sk, skb);
 
-       if (want_cookie && !tmp_opt.saw_tstamp)
-               tcp_clear_options(&tmp_opt);
-
-       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
-       tcp_openreq_init(req, &tmp_opt, skb);
-
-       ireq = inet_rsk(req);
-       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
-       if (!want_cookie || tmp_opt.tstamp_ok)
-               TCP_ECN_create_request(req, skb, sock_net(sk));
-
-       ireq->ir_iif = sk->sk_bound_dev_if;
-       ireq->ir_mark = inet_request_mark(sk, skb);
-
-       /* So that link locals have meaning */
-       if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq->ir_iif = inet6_iif(skb);
-
-       if (!isn) {
-               if (ipv6_opt_accepted(sk, skb) ||
-                   np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
-                   np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
-                   np->repflow) {
-                       atomic_inc(&skb->users);
-                       ireq->pktopts = skb;
-               }
-
-               if (want_cookie) {
-                       isn = cookie_v6_init_sequence(sk, skb, &req->mss);
-                       req->cookie_ts = tmp_opt.tstamp_ok;
-                       goto have_isn;
-               }
-
-               /* VJ's idea. We save last timestamp seen
-                * from the destination in peer table, when entering
-                * state TIME-WAIT, and check against it before
-                * accepting new connection request.
-                *
-                * If "isn" is not zero, this request hit alive
-                * timewait bucket, so that all the necessary checks
-                * are made in the function processing timewait state.
-                */
-               if (tmp_opt.saw_tstamp &&
-                   tcp_death_row.sysctl_tw_recycle &&
-                   (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
-                       if (!tcp_peer_is_proven(req, dst, true)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
-                               goto drop_and_release;
-                       }
-               }
-               /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
-                        !tcp_peer_is_proven(req, dst, false)) {
-                       /* Without syncookies last quarter of
-                        * backlog is filled with destinations,
-                        * proven to be alive.
-                        * It means that we continue to communicate
-                        * to destinations, already remembered
-                        * to the moment of synflood.
-                        */
-                       LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
-                                      &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
-                       goto drop_and_release;
-               }
-
-               isn = tcp_v6_init_sequence(skb);
-       }
-have_isn:
-
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_release;
-
-       if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
-               goto drop_and_free;
-
-       tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_openreq_init_rwin(req, sk, dst);
-       fastopen = !want_cookie &&
-                  tcp_try_fastopen(sk, skb, req, &foc, dst);
-       err = tcp_v6_send_synack(sk, dst, &fl6, req,
-                                skb_get_queue_mapping(skb), &foc);
-       if (!fastopen) {
-               if (err || want_cookie)
-                       goto drop_and_free;
-
-               tcp_rsk(req)->listener = NULL;
-               inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-       }
-       return 0;
-
-drop_and_release:
-       dst_release(dst);
-drop_and_free:
-       reqsk_free(req);
 drop:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
@@ -1235,6 +1134,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
+       ip6_set_txhash(newsk);
+
        /* Now IPv6 options...
 
           First: no IPv4 options.
index 7092ff78fd8498e1cf84a20091b92d6c867a64e6..f9d8800bb72fc43d7a83ec30abb7d7d41fd17978 100644 (file)
@@ -79,7 +79,6 @@ static unsigned int udp6_ehashfn(struct net *net,
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
        int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
@@ -95,7 +94,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
                return 1;
 
        if (addr_type == IPV6_ADDR_ANY &&
-           !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+           !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
                return 1;
 
        if (sk2_rcv_saddr6 &&
@@ -703,43 +702,26 @@ drop:
        return -1;
 }
 
-static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
-                                     __be16 loc_port, const struct in6_addr *loc_addr,
-                                     __be16 rmt_port, const struct in6_addr *rmt_addr,
-                                     int dif)
+static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
+                                  __be16 loc_port, const struct in6_addr *loc_addr,
+                                  __be16 rmt_port, const struct in6_addr *rmt_addr,
+                                  int dif, unsigned short hnum)
 {
-       struct hlist_nulls_node *node;
-       unsigned short num = ntohs(loc_port);
-
-       sk_nulls_for_each_from(sk, node) {
-               struct inet_sock *inet = inet_sk(sk);
-
-               if (!net_eq(sock_net(sk), net))
-                       continue;
-
-               if (udp_sk(sk)->udp_port_hash == num &&
-                   sk->sk_family == PF_INET6) {
-                       if (inet->inet_dport) {
-                               if (inet->inet_dport != rmt_port)
-                                       continue;
-                       }
-                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
-                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
-                               continue;
-
-                       if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
-                               continue;
+       struct inet_sock *inet = inet_sk(sk);
 
-                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-                               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
-                                       continue;
-                       }
-                       if (!inet6_mc_check(sk, loc_addr, rmt_addr))
-                               continue;
-                       return sk;
-               }
-       }
-       return NULL;
+       if (!net_eq(sock_net(sk), net))
+               return false;
+
+       if (udp_sk(sk)->udp_port_hash != hnum ||
+           sk->sk_family != PF_INET6 ||
+           (inet->inet_dport && inet->inet_dport != rmt_port) ||
+           (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                   !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+               return false;
+       if (!inet6_mc_check(sk, loc_addr, rmt_addr))
+               return false;
+       return true;
 }
 
 static void flush_stack(struct sock **stack, unsigned int count,
@@ -763,6 +745,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
                if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
                        skb1 = NULL;
+               sock_put(sk);
        }
        if (unlikely(skb1))
                kfree_skb(skb1);
@@ -788,43 +771,51 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 {
        struct sock *sk, *stack[256 / sizeof(struct sock *)];
        const struct udphdr *uh = udp_hdr(skb);
-       struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
-       int dif;
-       unsigned int i, count = 0;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(uh->dest);
+       struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+       int dif = inet6_iif(skb);
+       unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+       unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+
+       if (use_hash2) {
+               hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
+                           udp_table.mask;
+               hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+start_lookup:
+               hslot = &udp_table.hash2[hash2];
+               offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
+       }
 
        spin_lock(&hslot->lock);
-       sk = sk_nulls_head(&hslot->head);
-       dif = inet6_iif(skb);
-       sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
-       while (sk) {
-               /* If zero checksum and no_check is not on for
-                * the socket then skip it.
-                */
-               if (uh->check || udp_sk(sk)->no_check6_rx)
+       sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
+               if (__udp_v6_is_mcast_sock(net, sk,
+                                          uh->dest, daddr,
+                                          uh->source, saddr,
+                                          dif, hnum) &&
+                   /* If zero checksum and no_check is not on for
+                    * the socket then skip it.
+                    */
+                   (uh->check || udp_sk(sk)->no_check6_rx)) {
+                       if (unlikely(count == ARRAY_SIZE(stack))) {
+                               flush_stack(stack, count, skb, ~0);
+                               count = 0;
+                       }
                        stack[count++] = sk;
-
-               sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
-                                      uh->source, saddr, dif);
-               if (unlikely(count == ARRAY_SIZE(stack))) {
-                       if (!sk)
-                               break;
-                       flush_stack(stack, count, skb, ~0);
-                       count = 0;
+                       sock_hold(sk);
                }
        }
-       /*
-        * before releasing the lock, we must take reference on sockets
-        */
-       for (i = 0; i < count; i++)
-               sock_hold(stack[i]);
 
        spin_unlock(&hslot->lock);
 
+       /* Also lookup *:port if we are using hash2 and haven't done so yet. */
+       if (use_hash2 && hash2 != hash2_any) {
+               hash2 = hash2_any;
+               goto start_lookup;
+       }
+
        if (count) {
                flush_stack(stack, count, skb, count - 1);
-
-               for (i = 0; i < count; i++)
-                       sock_put(stack[i]);
        } else {
                kfree_skb(skb);
        }
index 54747c25c86c47709f875cf1a7c1f37872fb8fa1..92fafd485deb610cad0e6a03ac41ede8aed29553 100644 (file)
@@ -674,7 +674,6 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
                        self->daddr = DEV_ADDR_ANY;
                        kfree(discoveries);
                        return -EHOSTUNREACH;
-                       break;
                }
        }
        /* Cleanup our copy of the discovery log */
index 365b895da84b5ef3565527675e4103ba05b6dffe..9e0d909390fd0569a50a2e407ee33e2610ae9cae 100644 (file)
@@ -293,7 +293,8 @@ static void irda_device_setup(struct net_device *dev)
  */
 struct net_device *alloc_irdadev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "irda%d", irda_device_setup);
+       return alloc_netdev(sizeof_priv, "irda%d", NET_NAME_UNKNOWN,
+                           irda_device_setup);
 }
 EXPORT_SYMBOL(alloc_irdadev);
 
index 7ac4d1becbfca373330c8618b4f865c304433e95..1bc49edf22966fdce8c8fbf136a3c4e7800fe5b8 100644 (file)
@@ -1024,7 +1024,6 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
        default:
                IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ );
                return 0;
-               break;
        }
 
        /* Insert at end of sk-buffer */
index ffcec225b5d98eb766e2a6396a3e7fe2a65ee729..dc13f1a45f2f7741d98d35d5299e78d4167f2d60 100644 (file)
@@ -96,7 +96,7 @@ static void irlan_eth_setup(struct net_device *dev)
  */
 struct net_device *alloc_irlandev(const char *name)
 {
-       return alloc_netdev(sizeof(struct irlan_cb), name,
+       return alloc_netdev(sizeof(struct irlan_cb), name, NET_NAME_UNKNOWN,
                            irlan_eth_setup);
 }
 
index 98ad6ec4bd3cc40190f199151b9fb2f9348a9d07..a5f28d421ea843824629d49302d12aed36b9fe00 100644 (file)
@@ -1426,7 +1426,8 @@ __u8 *irlmp_hint_to_service(__u8 *hint)
                if (hint[1] & HINT_TELEPHONY) {
                        IRDA_DEBUG(1, "Telephony ");
                        service[i++] = S_TELEPHONY;
-               } if (hint[1] & HINT_FILE_SERVER)
+               }
+               if (hint[1] & HINT_FILE_SERVER)
                        IRDA_DEBUG(1, "File Server ");
 
                if (hint[1] & HINT_COMM) {
index 7a95fa4a3de1e558a07485bd8f6dbb3b4dcf32b3..a089b6b91650b9c17e786e1045cbeac537612dd1 100644 (file)
@@ -1103,7 +1103,6 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                default:
                        err = -EINVAL;
                        goto out;
-                       break;
                }
        }
 
@@ -1543,7 +1542,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
 
        sk->sk_shutdown |= how;
        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
-               if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+               if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
+                   iucv->path) {
                        err = pr_iucv->path_quiesce(iucv->path, NULL);
                        if (err)
                                err = -ENOTCONN;
index ba2a2f95911c99732dc2e3fb1ae37b9f977d1f5a..1847ec4e39305f205c68acaf50579bfceff4d91f 100644 (file)
@@ -405,7 +405,6 @@ static int verify_address_len(const void *p)
                 * XXX When it can, remove this -EINVAL.  -DaveM
                 */
                return -EINVAL;
-               break;
        }
 
        return 0;
@@ -536,7 +535,6 @@ pfkey_satype2proto(uint8_t satype)
                return IPPROTO_ESP;
        case SADB_X_SATYPE_IPCOMP:
                return IPPROTO_COMP;
-               break;
        default:
                return 0;
        }
@@ -553,7 +551,6 @@ pfkey_proto2satype(uint16_t proto)
                return SADB_SATYPE_ESP;
        case IPPROTO_COMP:
                return SADB_X_SATYPE_IPCOMP;
-               break;
        default:
                return 0;
        }
index adb9843dd7cfda0199232eb0ecfad77485475a83..378c73b26093b22f39ee53f77d6b09d8b8ddf3c0 100644 (file)
@@ -6,6 +6,7 @@ menuconfig L2TP
        tristate "Layer Two Tunneling Protocol (L2TP)"
        depends on (IPV6 || IPV6=n)
        depends on INET
+       select NET_UDP_TUNNEL
        ---help---
          Layer Two Tunneling Protocol
 
index bea259043205ebe3605c34ed35da504807792af1..1109d3bb8dac8d4142eb6bdd159915b2f978ea45 100644 (file)
@@ -52,6 +52,7 @@
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/udp.h>
+#include <net/udp_tunnel.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 #include <net/protocol.h>
@@ -1358,81 +1359,46 @@ static int l2tp_tunnel_sock_create(struct net *net,
 {
        int err = -EINVAL;
        struct socket *sock = NULL;
-       struct sockaddr_in udp_addr = {0};
-       struct sockaddr_l2tpip ip_addr = {0};
-#if IS_ENABLED(CONFIG_IPV6)
-       struct sockaddr_in6 udp6_addr = {0};
-       struct sockaddr_l2tpip6 ip6_addr = {0};
-#endif
+       struct udp_port_cfg udp_conf;
 
        switch (cfg->encap) {
        case L2TP_ENCAPTYPE_UDP:
+               memset(&udp_conf, 0, sizeof(udp_conf));
+
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
-                       err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
-                       if (err < 0)
-                               goto out;
-
-                       sk_change_net(sock->sk, net);
-
-                       udp6_addr.sin6_family = AF_INET6;
-                       memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
-                              sizeof(udp6_addr.sin6_addr));
-                       udp6_addr.sin6_port = htons(cfg->local_udp_port);
-                       err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
-                                         sizeof(udp6_addr));
-                       if (err < 0)
-                               goto out;
-
-                       udp6_addr.sin6_family = AF_INET6;
-                       memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
-                              sizeof(udp6_addr.sin6_addr));
-                       udp6_addr.sin6_port = htons(cfg->peer_udp_port);
-                       err = kernel_connect(sock,
-                                            (struct sockaddr *) &udp6_addr,
-                                            sizeof(udp6_addr), 0);
-                       if (err < 0)
-                               goto out;
-
-                       if (cfg->udp6_zero_tx_checksums)
-                               udp_set_no_check6_tx(sock->sk, true);
-                       if (cfg->udp6_zero_rx_checksums)
-                               udp_set_no_check6_rx(sock->sk, true);
+                       udp_conf.family = AF_INET6;
+                       memcpy(&udp_conf.local_ip6, cfg->local_ip6,
+                              sizeof(udp_conf.local_ip6));
+                       memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
+                              sizeof(udp_conf.peer_ip6));
+                       udp_conf.use_udp6_tx_checksums =
+                           cfg->udp6_zero_tx_checksums;
+                       udp_conf.use_udp6_rx_checksums =
+                           cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
-                       err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
-                       if (err < 0)
-                               goto out;
-
-                       sk_change_net(sock->sk, net);
-
-                       udp_addr.sin_family = AF_INET;
-                       udp_addr.sin_addr = cfg->local_ip;
-                       udp_addr.sin_port = htons(cfg->local_udp_port);
-                       err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
-                                         sizeof(udp_addr));
-                       if (err < 0)
-                               goto out;
-
-                       udp_addr.sin_family = AF_INET;
-                       udp_addr.sin_addr = cfg->peer_ip;
-                       udp_addr.sin_port = htons(cfg->peer_udp_port);
-                       err = kernel_connect(sock,
-                                            (struct sockaddr *) &udp_addr,
-                                            sizeof(udp_addr), 0);
-                       if (err < 0)
-                               goto out;
+                       udp_conf.family = AF_INET;
+                       udp_conf.local_ip = cfg->local_ip;
+                       udp_conf.peer_ip = cfg->peer_ip;
+                       udp_conf.use_udp_checksums = cfg->use_udp_checksums;
                }
 
-               if (!cfg->use_udp_checksums)
-                       sock->sk->sk_no_check_tx = 1;
+               udp_conf.local_udp_port = htons(cfg->local_udp_port);
+               udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
+
+               err = udp_sock_create(net, &udp_conf, &sock);
+               if (err < 0)
+                       goto out;
 
                break;
 
        case L2TP_ENCAPTYPE_IP:
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
+                       struct sockaddr_l2tpip6 ip6_addr = {0};
+
                        err = sock_create_kern(AF_INET6, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
@@ -1461,6 +1427,8 @@ static int l2tp_tunnel_sock_create(struct net *net,
                } else
 #endif
                {
+                       struct sockaddr_l2tpip ip_addr = {0};
+
                        err = sock_create_kern(AF_INET, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
index 76125c57ee6dddd2396a8a0bef4f19e7720e43ed..edb78e69efe47ad3bd2895ceb1a0d5aaf61cfc38 100644 (file)
@@ -246,7 +246,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
                goto out;
        }
 
-       dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
+       dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                          l2tp_eth_dev_setup);
        if (!dev) {
                rc = -ENOMEM;
                goto out_del_session;
index 97b5dcad50250d22bb5c5ad7ac41c18558d967ce..aeb6a483b3bc881e4b45c6dad7aacc9ae57bde37 100644 (file)
@@ -19,14 +19,6 @@ if MAC80211 != n
 config MAC80211_HAS_RC
        bool
 
-config MAC80211_RC_PID
-       bool "PID controller based rate control algorithm" if EXPERT
-       select MAC80211_HAS_RC
-       ---help---
-         This option enables a TX rate control algorithm for
-         mac80211 that uses a PID controller to select the TX
-         rate.
-
 config MAC80211_RC_MINSTREL
        bool "Minstrel" if EXPERT
        select MAC80211_HAS_RC
@@ -51,14 +43,6 @@ choice
          overridden through the ieee80211_default_rc_algo module
          parameter if different algorithms are available.
 
-config MAC80211_RC_DEFAULT_PID
-       bool "PID controller based rate control algorithm"
-       depends on MAC80211_RC_PID
-       ---help---
-         Select the PID controller based rate control as the
-         default rate control algorithm. You should choose
-         this unless you know what you are doing.
-
 config MAC80211_RC_DEFAULT_MINSTREL
        bool "Minstrel"
        depends on MAC80211_RC_MINSTREL
@@ -72,7 +56,6 @@ config MAC80211_RC_DEFAULT
        string
        default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
        default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
-       default "pid" if MAC80211_RC_DEFAULT_PID
        default ""
 
 endif
index 1e46ffa69167973921b795f8757f903234a61b94..7273d2796dd1a79e4a529bb38a69a7722a7dec3e 100644 (file)
@@ -17,6 +17,7 @@ mac80211-y := \
        aes_ccm.o \
        aes_cmac.o \
        cfg.o \
+       ethtool.o \
        rx.o \
        spectmgmt.o \
        tx.o \
@@ -47,17 +48,12 @@ mac80211-$(CONFIG_PM) += pm.o
 
 CFLAGS_trace.o := -I$(src)
 
-# objects for PID algorithm
-rc80211_pid-y := rc80211_pid_algo.o
-rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
-
 rc80211_minstrel-y := rc80211_minstrel.o
 rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
 
 rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
 rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
 
-mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
 
index ce9633a3cfb0c54abe7aa87746f2b843cf33e65c..d6986f3aa5c469fa15dfbc585d72ec0b5cc53af4 100644 (file)
@@ -170,10 +170,13 @@ ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
 {
        int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
 
+       /* we do refcounting here, so don't use the queue reason refcounting */
+
        if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
                ieee80211_stop_queue_by_reason(
                        &sdata->local->hw, queue,
-                       IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+                       IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
+                       false);
        __acquire(agg_queue);
 }
 
@@ -185,7 +188,8 @@ ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
        if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
                ieee80211_wake_queue_by_reason(
                        &sdata->local->hw, queue,
-                       IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+                       IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
+                       false);
        __release(agg_queue);
 }
 
index d7513a503be11b180031342dcf316450fd6c69d3..927b4ea0128bbc365a9692302ba299d5da16b2c4 100644 (file)
@@ -468,327 +468,6 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
                rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
 }
 
-static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
-{
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct ieee80211_local *local = sdata->local;
-       struct rate_control_ref *ref = local->rate_ctrl;
-       struct timespec uptime;
-       u64 packets = 0;
-       u32 thr = 0;
-       int i, ac;
-
-       sinfo->generation = sdata->local->sta_generation;
-
-       sinfo->filled = STATION_INFO_INACTIVE_TIME |
-                       STATION_INFO_RX_BYTES64 |
-                       STATION_INFO_TX_BYTES64 |
-                       STATION_INFO_RX_PACKETS |
-                       STATION_INFO_TX_PACKETS |
-                       STATION_INFO_TX_RETRIES |
-                       STATION_INFO_TX_FAILED |
-                       STATION_INFO_TX_BITRATE |
-                       STATION_INFO_RX_BITRATE |
-                       STATION_INFO_RX_DROP_MISC |
-                       STATION_INFO_BSS_PARAM |
-                       STATION_INFO_CONNECTED_TIME |
-                       STATION_INFO_STA_FLAGS |
-                       STATION_INFO_BEACON_LOSS_COUNT;
-
-       do_posix_clock_monotonic_gettime(&uptime);
-       sinfo->connected_time = uptime.tv_sec - sta->last_connected;
-
-       sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
-       sinfo->tx_bytes = 0;
-       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               sinfo->tx_bytes += sta->tx_bytes[ac];
-               packets += sta->tx_packets[ac];
-       }
-       sinfo->tx_packets = packets;
-       sinfo->rx_bytes = sta->rx_bytes;
-       sinfo->rx_packets = sta->rx_packets;
-       sinfo->tx_retries = sta->tx_retry_count;
-       sinfo->tx_failed = sta->tx_retry_failed;
-       sinfo->rx_dropped_misc = sta->rx_dropped;
-       sinfo->beacon_loss_count = sta->beacon_loss_count;
-
-       if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
-           (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
-               sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
-               if (!local->ops->get_rssi ||
-                   drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
-                       sinfo->signal = (s8)sta->last_signal;
-               sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
-       }
-       if (sta->chains) {
-               sinfo->filled |= STATION_INFO_CHAIN_SIGNAL |
-                                STATION_INFO_CHAIN_SIGNAL_AVG;
-
-               sinfo->chains = sta->chains;
-               for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
-                       sinfo->chain_signal[i] = sta->chain_signal_last[i];
-                       sinfo->chain_signal_avg[i] =
-                               (s8) -ewma_read(&sta->chain_signal_avg[i]);
-               }
-       }
-
-       sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
-       sta_set_rate_info_rx(sta, &sinfo->rxrate);
-
-       if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
-               sinfo->filled |= STATION_INFO_LLID |
-                                STATION_INFO_PLID |
-                                STATION_INFO_PLINK_STATE |
-                                STATION_INFO_LOCAL_PM |
-                                STATION_INFO_PEER_PM |
-                                STATION_INFO_NONPEER_PM;
-
-               sinfo->llid = sta->llid;
-               sinfo->plid = sta->plid;
-               sinfo->plink_state = sta->plink_state;
-               if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-                       sinfo->filled |= STATION_INFO_T_OFFSET;
-                       sinfo->t_offset = sta->t_offset;
-               }
-               sinfo->local_pm = sta->local_pm;
-               sinfo->peer_pm = sta->peer_pm;
-               sinfo->nonpeer_pm = sta->nonpeer_pm;
-#endif
-       }
-
-       sinfo->bss_param.flags = 0;
-       if (sdata->vif.bss_conf.use_cts_prot)
-               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
-       if (sdata->vif.bss_conf.use_short_preamble)
-               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
-       if (sdata->vif.bss_conf.use_short_slot)
-               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
-       sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
-       sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
-
-       sinfo->sta_flags.set = 0;
-       sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
-                               BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
-                               BIT(NL80211_STA_FLAG_WME) |
-                               BIT(NL80211_STA_FLAG_MFP) |
-                               BIT(NL80211_STA_FLAG_AUTHENTICATED) |
-                               BIT(NL80211_STA_FLAG_ASSOCIATED) |
-                               BIT(NL80211_STA_FLAG_TDLS_PEER);
-       if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
-       if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
-       if (test_sta_flag(sta, WLAN_STA_WME))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
-       if (test_sta_flag(sta, WLAN_STA_MFP))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
-       if (test_sta_flag(sta, WLAN_STA_AUTH))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
-       if (test_sta_flag(sta, WLAN_STA_ASSOC))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
-       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
-               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
-
-       /* check if the driver has a SW RC implementation */
-       if (ref && ref->ops->get_expected_throughput)
-               thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
-       else
-               thr = drv_get_expected_throughput(local, &sta->sta);
-
-       if (thr != 0) {
-               sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
-               sinfo->expected_throughput = thr;
-       }
-}
-
-static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
-       "rx_packets", "rx_bytes", "wep_weak_iv_count",
-       "rx_duplicates", "rx_fragments", "rx_dropped",
-       "tx_packets", "tx_bytes", "tx_fragments",
-       "tx_filtered", "tx_retry_failed", "tx_retries",
-       "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
-       "channel", "noise", "ch_time", "ch_time_busy",
-       "ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
-};
-#define STA_STATS_LEN  ARRAY_SIZE(ieee80211_gstrings_sta_stats)
-
-static int ieee80211_get_et_sset_count(struct wiphy *wiphy,
-                                      struct net_device *dev,
-                                      int sset)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       int rv = 0;
-
-       if (sset == ETH_SS_STATS)
-               rv += STA_STATS_LEN;
-
-       rv += drv_get_et_sset_count(sdata, sset);
-
-       if (rv == 0)
-               return -EOPNOTSUPP;
-       return rv;
-}
-
-static void ieee80211_get_et_stats(struct wiphy *wiphy,
-                                  struct net_device *dev,
-                                  struct ethtool_stats *stats,
-                                  u64 *data)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       struct ieee80211_channel *channel;
-       struct sta_info *sta;
-       struct ieee80211_local *local = sdata->local;
-       struct station_info sinfo;
-       struct survey_info survey;
-       int i, q;
-#define STA_STATS_SURVEY_LEN 7
-
-       memset(data, 0, sizeof(u64) * STA_STATS_LEN);
-
-#define ADD_STA_STATS(sta)                             \
-       do {                                            \
-               data[i++] += sta->rx_packets;           \
-               data[i++] += sta->rx_bytes;             \
-               data[i++] += sta->wep_weak_iv_count;    \
-               data[i++] += sta->num_duplicates;       \
-               data[i++] += sta->rx_fragments;         \
-               data[i++] += sta->rx_dropped;           \
-                                                       \
-               data[i++] += sinfo.tx_packets;          \
-               data[i++] += sinfo.tx_bytes;            \
-               data[i++] += sta->tx_fragments;         \
-               data[i++] += sta->tx_filtered_count;    \
-               data[i++] += sta->tx_retry_failed;      \
-               data[i++] += sta->tx_retry_count;       \
-               data[i++] += sta->beacon_loss_count;    \
-       } while (0)
-
-       /* For Managed stations, find the single station based on BSSID
-        * and use that.  For interface types, iterate through all available
-        * stations and add stats for any station that is assigned to this
-        * network device.
-        */
-
-       mutex_lock(&local->sta_mtx);
-
-       if (sdata->vif.type == NL80211_IFTYPE_STATION) {
-               sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
-
-               if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
-                       goto do_survey;
-
-               sinfo.filled = 0;
-               sta_set_sinfo(sta, &sinfo);
-
-               i = 0;
-               ADD_STA_STATS(sta);
-
-               data[i++] = sta->sta_state;
-
-
-               if (sinfo.filled & STATION_INFO_TX_BITRATE)
-                       data[i] = 100000 *
-                               cfg80211_calculate_bitrate(&sinfo.txrate);
-               i++;
-               if (sinfo.filled & STATION_INFO_RX_BITRATE)
-                       data[i] = 100000 *
-                               cfg80211_calculate_bitrate(&sinfo.rxrate);
-               i++;
-
-               if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
-                       data[i] = (u8)sinfo.signal_avg;
-               i++;
-       } else {
-               list_for_each_entry(sta, &local->sta_list, list) {
-                       /* Make sure this station belongs to the proper dev */
-                       if (sta->sdata->dev != dev)
-                               continue;
-
-                       sinfo.filled = 0;
-                       sta_set_sinfo(sta, &sinfo);
-                       i = 0;
-                       ADD_STA_STATS(sta);
-               }
-       }
-
-do_survey:
-       i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
-       /* Get survey stats for current channel */
-       survey.filled = 0;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (chanctx_conf)
-               channel = chanctx_conf->def.chan;
-       else
-               channel = NULL;
-       rcu_read_unlock();
-
-       if (channel) {
-               q = 0;
-               do {
-                       survey.filled = 0;
-                       if (drv_get_survey(local, q, &survey) != 0) {
-                               survey.filled = 0;
-                               break;
-                       }
-                       q++;
-               } while (channel != survey.channel);
-       }
-
-       if (survey.filled)
-               data[i++] = survey.channel->center_freq;
-       else
-               data[i++] = 0;
-       if (survey.filled & SURVEY_INFO_NOISE_DBM)
-               data[i++] = (u8)survey.noise;
-       else
-               data[i++] = -1LL;
-       if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
-               data[i++] = survey.channel_time;
-       else
-               data[i++] = -1LL;
-       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
-               data[i++] = survey.channel_time_busy;
-       else
-               data[i++] = -1LL;
-       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
-               data[i++] = survey.channel_time_ext_busy;
-       else
-               data[i++] = -1LL;
-       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
-               data[i++] = survey.channel_time_rx;
-       else
-               data[i++] = -1LL;
-       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
-               data[i++] = survey.channel_time_tx;
-       else
-               data[i++] = -1LL;
-
-       mutex_unlock(&local->sta_mtx);
-
-       if (WARN_ON(i != STA_STATS_LEN))
-               return;
-
-       drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
-}
-
-static void ieee80211_get_et_strings(struct wiphy *wiphy,
-                                    struct net_device *dev,
-                                    u32 sset, u8 *data)
-{
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       int sz_sta_stats = 0;
-
-       if (sset == ETH_SS_STATS) {
-               sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
-               memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
-       }
-       drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
-}
-
 static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
                                  int idx, u8 *mac, struct station_info *sinfo)
 {
@@ -875,7 +554,8 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
 }
 
 static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
-                                   const u8 *resp, size_t resp_len)
+                                   const u8 *resp, size_t resp_len,
+                                   const struct ieee80211_csa_settings *csa)
 {
        struct probe_resp *new, *old;
 
@@ -891,6 +571,11 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
        new->len = resp_len;
        memcpy(new->data, resp, resp_len);
 
+       if (csa)
+               memcpy(new->csa_counter_offsets, csa->counter_offsets_presp,
+                      csa->n_counter_offsets_presp *
+                      sizeof(new->csa_counter_offsets[0]));
+
        rcu_assign_pointer(sdata->u.ap.probe_resp, new);
        if (old)
                kfree_rcu(old, rcu_head);
@@ -899,7 +584,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
 }
 
 static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
-                                  struct cfg80211_beacon_data *params)
+                                  struct cfg80211_beacon_data *params,
+                                  const struct ieee80211_csa_settings *csa)
 {
        struct beacon_data *new, *old;
        int new_head_len, new_tail_len;
@@ -943,6 +629,13 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
        new->head_len = new_head_len;
        new->tail_len = new_tail_len;
 
+       if (csa) {
+               new->csa_current_counter = csa->count;
+               memcpy(new->csa_counter_offsets, csa->counter_offsets_beacon,
+                      csa->n_counter_offsets_beacon *
+                      sizeof(new->csa_counter_offsets[0]));
+       }
+
        /* copy in head */
        if (params->head)
                memcpy(new->head, params->head, new_head_len);
@@ -957,7 +650,7 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
                        memcpy(new->tail, old->tail, new_tail_len);
 
        err = ieee80211_set_probe_resp(sdata, params->probe_resp,
-                                      params->probe_resp_len);
+                                      params->probe_resp_len, csa);
        if (err < 0)
                return err;
        if (err == 0)
@@ -1042,7 +735,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
                sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |=
                                        IEEE80211_P2P_OPPPS_ENABLE_BIT;
 
-       err = ieee80211_assign_beacon(sdata, &params->beacon);
+       err = ieee80211_assign_beacon(sdata, &params->beacon, NULL);
        if (err < 0) {
                ieee80211_vif_release_channel(sdata);
                return err;
@@ -1090,38 +783,13 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
        if (!old)
                return -ENOENT;
 
-       err = ieee80211_assign_beacon(sdata, params);
+       err = ieee80211_assign_beacon(sdata, params, NULL);
        if (err < 0)
                return err;
        ieee80211_bss_info_change_notify(sdata, err);
        return 0;
 }
 
-bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local)
-{
-       struct ieee80211_sub_if_data *sdata;
-
-       lockdep_assert_held(&local->mtx);
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-               if (!ieee80211_sdata_running(sdata))
-                       continue;
-
-               if (!sdata->vif.csa_active)
-                       continue;
-
-               if (!sdata->csa_block_tx)
-                       continue;
-
-               rcu_read_unlock();
-               return true;
-       }
-       rcu_read_unlock();
-
-       return false;
-}
-
 static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1141,10 +809,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        /* abort any running channel switch */
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
+
        mutex_unlock(&local->mtx);
 
        kfree(sdata->u.ap.next_beacon);
@@ -1327,9 +997,12 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                }
        }
 
-       ret = sta_apply_auth_flags(local, sta, mask, set);
-       if (ret)
-               return ret;
+       /* auth flags will be set later for TDLS stations */
+       if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+               ret = sta_apply_auth_flags(local, sta, mask, set);
+               if (ret)
+                       return ret;
+       }
 
        if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
                if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
@@ -1466,6 +1139,13 @@ static int sta_apply_parameters(struct ieee80211_local *local,
 #endif
        }
 
+       /* set the STA state after all sta info from usermode has been set */
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+               ret = sta_apply_auth_flags(local, sta, mask, set);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -3073,7 +2753,8 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
-               err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+               err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon,
+                                             NULL);
                kfree(sdata->u.ap.next_beacon);
                sdata->u.ap.next_beacon = NULL;
 
@@ -3111,17 +2792,35 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
 
        sdata_assert_lock(sdata);
        lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
 
-       sdata->radar_required = sdata->csa_radar_required;
-       err = ieee80211_vif_change_channel(sdata, &changed);
-       if (err < 0)
-               return err;
+       /*
+        * using reservation isn't immediate as it may be deferred until later
+        * with multi-vif. once reservation is complete it will re-schedule the
+        * work with no reserved_chanctx so verify chandef to check if it
+        * completed successfully
+        */
 
-       if (!local->use_chanctx) {
-               local->_oper_chandef = sdata->csa_chandef;
-               ieee80211_hw_config(local, 0);
+       if (sdata->reserved_chanctx) {
+               /*
+                * with multi-vif csa driver may call ieee80211_csa_finish()
+                * many times while waiting for other interfaces to use their
+                * reservations
+                */
+               if (sdata->reserved_ready)
+                       return 0;
+
+               err = ieee80211_vif_use_reserved_context(sdata);
+               if (err)
+                       return err;
+
+               return 0;
        }
 
+       if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
+                                       &sdata->csa_chandef))
+               return -EINVAL;
+
        sdata->vif.csa_active = false;
 
        err = ieee80211_set_after_csa_beacon(sdata, &changed);
@@ -3131,10 +2830,11 @@ static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
        ieee80211_bss_info_change_notify(sdata, changed);
        cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
 
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
 
        return 0;
 }
@@ -3157,6 +2857,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
 
        sdata_lock(sdata);
        mutex_lock(&local->mtx);
+       mutex_lock(&local->chanctx_mtx);
 
        /* AP might have been stopped while waiting for the lock. */
        if (!sdata->vif.csa_active)
@@ -3168,6 +2869,7 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
        ieee80211_csa_finalize(sdata);
 
 unlock:
+       mutex_unlock(&local->chanctx_mtx);
        mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
 }
@@ -3176,6 +2878,7 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
                                    struct cfg80211_csa_settings *params,
                                    u32 *changed)
 {
+       struct ieee80211_csa_settings csa = {};
        int err;
 
        switch (sdata->vif.type) {
@@ -3210,20 +2913,13 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
                     IEEE80211_MAX_CSA_COUNTERS_NUM))
                        return -EINVAL;
 
-               /* make sure we don't have garbage in other counters */
-               memset(sdata->csa_counter_offset_beacon, 0,
-                      sizeof(sdata->csa_counter_offset_beacon));
-               memset(sdata->csa_counter_offset_presp, 0,
-                      sizeof(sdata->csa_counter_offset_presp));
-
-               memcpy(sdata->csa_counter_offset_beacon,
-                      params->counter_offsets_beacon,
-                      params->n_counter_offsets_beacon * sizeof(u16));
-               memcpy(sdata->csa_counter_offset_presp,
-                      params->counter_offsets_presp,
-                      params->n_counter_offsets_presp * sizeof(u16));
+               csa.counter_offsets_beacon = params->counter_offsets_beacon;
+               csa.counter_offsets_presp = params->counter_offsets_presp;
+               csa.n_counter_offsets_beacon = params->n_counter_offsets_beacon;
+               csa.n_counter_offsets_presp = params->n_counter_offsets_presp;
+               csa.count = params->count;
 
-               err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+               err = ieee80211_assign_beacon(sdata, &params->beacon_csa, &csa);
                if (err < 0) {
                        kfree(sdata->u.ap.next_beacon);
                        return err;
@@ -3319,7 +3015,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *chanctx;
-       int err, num_chanctx, changed = 0;
+       int err, changed = 0;
 
        sdata_assert_lock(sdata);
        lockdep_assert_held(&local->mtx);
@@ -3334,46 +3030,50 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                                       &sdata->vif.bss_conf.chandef))
                return -EINVAL;
 
+       /* don't allow another channel switch if one is already active. */
+       if (sdata->vif.csa_active)
+               return -EBUSY;
+
        mutex_lock(&local->chanctx_mtx);
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
                                         lockdep_is_held(&local->chanctx_mtx));
        if (!conf) {
-               mutex_unlock(&local->chanctx_mtx);
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
-       /* don't handle for multi-VIF cases */
        chanctx = container_of(conf, struct ieee80211_chanctx, conf);
-       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
-               mutex_unlock(&local->chanctx_mtx);
-               return -EBUSY;
+       if (!chanctx) {
+               err = -EBUSY;
+               goto out;
        }
-       num_chanctx = 0;
-       list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
-               num_chanctx++;
-       mutex_unlock(&local->chanctx_mtx);
 
-       if (num_chanctx > 1)
-               return -EBUSY;
+       err = ieee80211_vif_reserve_chanctx(sdata, &params->chandef,
+                                           chanctx->mode,
+                                           params->radar_required);
+       if (err)
+               goto out;
 
-       /* don't allow another channel switch if one is already active. */
-       if (sdata->vif.csa_active)
-               return -EBUSY;
+       /* if reservation is invalid then this will fail */
+       err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0);
+       if (err) {
+               ieee80211_vif_unreserve_chanctx(sdata);
+               goto out;
+       }
 
        err = ieee80211_set_csa_beacon(sdata, params, &changed);
-       if (err)
-               return err;
+       if (err) {
+               ieee80211_vif_unreserve_chanctx(sdata);
+               goto out;
+       }
 
-       sdata->csa_radar_required = params->radar_required;
        sdata->csa_chandef = params->chandef;
        sdata->csa_block_tx = params->block_tx;
-       sdata->csa_current_counter = params->count;
        sdata->vif.csa_active = true;
 
        if (sdata->csa_block_tx)
-               ieee80211_stop_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+               ieee80211_stop_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
 
        if (changed) {
                ieee80211_bss_info_change_notify(sdata, changed);
@@ -3383,7 +3083,9 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_csa_finalize(sdata);
        }
 
-       return 0;
+out:
+       mutex_unlock(&local->chanctx_mtx);
+       return err;
 }
 
 int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
@@ -3515,10 +3217,23 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
             sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
            params->n_csa_offsets) {
                int i;
-               u8 c = sdata->csa_current_counter;
+               struct beacon_data *beacon = NULL;
 
-               for (i = 0; i < params->n_csa_offsets; i++)
-                       data[params->csa_offsets[i]] = c;
+               rcu_read_lock();
+
+               if (sdata->vif.type == NL80211_IFTYPE_AP)
+                       beacon = rcu_dereference(sdata->u.ap.beacon);
+               else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+                       beacon = rcu_dereference(sdata->u.ibss.presp);
+               else if (ieee80211_vif_is_mesh(&sdata->vif))
+                       beacon = rcu_dereference(sdata->u.mesh.beacon);
+
+               if (beacon)
+                       for (i = 0; i < params->n_csa_offsets; i++)
+                               data[params->csa_offsets[i]] =
+                                       beacon->csa_current_counter;
+
+               rcu_read_unlock();
        }
 
        IEEE80211_SKB_CB(skb)->flags = flags;
@@ -3598,21 +3313,6 @@ static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
        return drv_get_antenna(local, tx_ant, rx_ant);
 }
 
-static int ieee80211_set_ringparam(struct wiphy *wiphy, u32 tx, u32 rx)
-{
-       struct ieee80211_local *local = wiphy_priv(wiphy);
-
-       return drv_set_ringparam(local, tx, rx);
-}
-
-static void ieee80211_get_ringparam(struct wiphy *wiphy,
-                                   u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
-{
-       struct ieee80211_local *local = wiphy_priv(wiphy);
-
-       drv_get_ringparam(local, tx, tx_max, rx, rx_max);
-}
-
 static int ieee80211_set_rekey_data(struct wiphy *wiphy,
                                    struct net_device *dev,
                                    struct cfg80211_gtk_rekey_data *data)
@@ -3844,8 +3544,6 @@ const struct cfg80211_ops mac80211_config_ops = {
        .mgmt_frame_register = ieee80211_mgmt_frame_register,
        .set_antenna = ieee80211_set_antenna,
        .get_antenna = ieee80211_get_antenna,
-       .set_ringparam = ieee80211_set_ringparam,
-       .get_ringparam = ieee80211_get_ringparam,
        .set_rekey_data = ieee80211_set_rekey_data,
        .tdls_oper = ieee80211_tdls_oper,
        .tdls_mgmt = ieee80211_tdls_mgmt,
@@ -3854,9 +3552,6 @@ const struct cfg80211_ops mac80211_config_ops = {
 #ifdef CONFIG_PM
        .set_wakeup = ieee80211_set_wakeup,
 #endif
-       .get_et_sset_count = ieee80211_get_et_sset_count,
-       .get_et_stats = ieee80211_get_et_stats,
-       .get_et_strings = ieee80211_get_et_strings,
        .get_channel = ieee80211_cfg_get_channel,
        .start_radar_detection = ieee80211_start_radar_detection,
        .channel_switch = ieee80211_channel_switch,
index a310e33972de8881bf4dd71bdff36d55fa966226..c3fd4d275bf42dbd77ebd12f6b7212fcc5f9817c 100644 (file)
@@ -63,6 +63,20 @@ static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local)
        return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local);
 }
 
+static struct ieee80211_chanctx *
+ieee80211_vif_get_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *conf;
+
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf)
+               return NULL;
+
+       return container_of(conf, struct ieee80211_chanctx, conf);
+}
+
 static const struct cfg80211_chan_def *
 ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *ctx,
@@ -160,6 +174,9 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local,
                return NULL;
 
        list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
+                       continue;
+
                if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
                        continue;
 
@@ -347,6 +364,9 @@ ieee80211_find_chanctx(struct ieee80211_local *local,
        list_for_each_entry(ctx, &local->chanctx_list, list) {
                const struct cfg80211_chan_def *compat;
 
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE)
+                       continue;
+
                if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE)
                        continue;
 
@@ -622,6 +642,7 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *ctx;
+       bool use_reserved_switch = false;
 
        lockdep_assert_held(&local->chanctx_mtx);
 
@@ -632,12 +653,23 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
-       if (sdata->reserved_chanctx)
+       if (sdata->reserved_chanctx) {
+               if (sdata->reserved_chanctx->replace_state ==
+                   IEEE80211_CHANCTX_REPLACES_OTHER &&
+                   ieee80211_chanctx_num_reserved(local,
+                                                  sdata->reserved_chanctx) > 1)
+                       use_reserved_switch = true;
+
                ieee80211_vif_unreserve_chanctx(sdata);
+       }
 
        ieee80211_assign_vif_chanctx(sdata, NULL);
        if (ieee80211_chanctx_refcount(local, ctx) == 0)
                ieee80211_free_chanctx(local, ctx);
+
+       /* Unreserving may ready an in-place reservation. */
+       if (use_reserved_switch)
+               ieee80211_vif_use_reserved_switch(local);
 }
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
@@ -787,70 +819,6 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
-static int __ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
-                                         struct ieee80211_chanctx *ctx,
-                                         u32 *changed)
-{
-       struct ieee80211_local *local = sdata->local;
-       const struct cfg80211_chan_def *chandef = &sdata->csa_chandef;
-       u32 chanctx_changed = 0;
-
-       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
-                                    IEEE80211_CHAN_DISABLED))
-               return -EINVAL;
-
-       if (ieee80211_chanctx_refcount(local, ctx) != 1)
-               return -EINVAL;
-
-       if (sdata->vif.bss_conf.chandef.width != chandef->width) {
-               chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
-               *changed |= BSS_CHANGED_BANDWIDTH;
-       }
-
-       sdata->vif.bss_conf.chandef = *chandef;
-       ctx->conf.def = *chandef;
-
-       chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
-       drv_change_chanctx(local, ctx, chanctx_changed);
-
-       ieee80211_recalc_chanctx_chantype(local, ctx);
-       ieee80211_recalc_smps_chanctx(local, ctx);
-       ieee80211_recalc_radar_chanctx(local, ctx);
-       ieee80211_recalc_chanctx_min_def(local, ctx);
-
-       return 0;
-}
-
-int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
-                                u32 *changed)
-{
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx_conf *conf;
-       struct ieee80211_chanctx *ctx;
-       int ret;
-
-       lockdep_assert_held(&local->mtx);
-
-       /* should never be called if not performing a channel switch. */
-       if (WARN_ON(!sdata->vif.csa_active))
-               return -EINVAL;
-
-       mutex_lock(&local->chanctx_mtx);
-       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
-                                        lockdep_is_held(&local->chanctx_mtx));
-       if (!conf) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ctx = container_of(conf, struct ieee80211_chanctx, conf);
-
-       ret = __ieee80211_vif_change_channel(sdata, ctx, changed);
- out:
-       mutex_unlock(&local->chanctx_mtx);
-       return ret;
-}
-
 static void
 __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
                                      bool clear)
@@ -905,8 +873,25 @@ int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata)
        list_del(&sdata->reserved_chanctx_list);
        sdata->reserved_chanctx = NULL;
 
-       if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0)
-               ieee80211_free_chanctx(sdata->local, ctx);
+       if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) {
+               if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) {
+                       if (WARN_ON(!ctx->replace_ctx))
+                               return -EINVAL;
+
+                       WARN_ON(ctx->replace_ctx->replace_state !=
+                               IEEE80211_CHANCTX_WILL_BE_REPLACED);
+                       WARN_ON(ctx->replace_ctx->replace_ctx != ctx);
+
+                       ctx->replace_ctx->replace_ctx = NULL;
+                       ctx->replace_ctx->replace_state =
+                                       IEEE80211_CHANCTX_REPLACE_NONE;
+
+                       list_del_rcu(&ctx->list);
+                       kfree_rcu(ctx, rcu_head);
+               } else {
+                       ieee80211_free_chanctx(sdata->local, ctx);
+               }
+       }
 
        return 0;
 }
@@ -917,40 +902,84 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
                                  bool radar_required)
 {
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx_conf *conf;
-       struct ieee80211_chanctx *new_ctx, *curr_ctx;
-       int ret = 0;
+       struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx;
 
-       mutex_lock(&local->chanctx_mtx);
-
-       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
-                                        lockdep_is_held(&local->chanctx_mtx));
-       if (!conf) {
-               ret = -EINVAL;
-               goto out;
-       }
+       lockdep_assert_held(&local->chanctx_mtx);
 
-       curr_ctx = container_of(conf, struct ieee80211_chanctx, conf);
+       curr_ctx = ieee80211_vif_get_chanctx(sdata);
+       if (curr_ctx && local->use_chanctx && !local->ops->switch_vif_chanctx)
+               return -ENOTSUPP;
 
        new_ctx = ieee80211_find_reservation_chanctx(local, chandef, mode);
        if (!new_ctx) {
-               if (ieee80211_chanctx_refcount(local, curr_ctx) == 1 &&
-                   (local->hw.flags & IEEE80211_HW_CHANGE_RUNNING_CHANCTX)) {
-                       /* if we're the only users of the chanctx and
-                        * the driver supports changing a running
-                        * context, reserve our current context
-                        */
-                       new_ctx = curr_ctx;
-               } else if (ieee80211_can_create_new_chanctx(local)) {
-                       /* create a new context and reserve it */
+               if (ieee80211_can_create_new_chanctx(local)) {
                        new_ctx = ieee80211_new_chanctx(local, chandef, mode);
-                       if (IS_ERR(new_ctx)) {
-                               ret = PTR_ERR(new_ctx);
-                               goto out;
-                       }
+                       if (IS_ERR(new_ctx))
+                               return PTR_ERR(new_ctx);
                } else {
-                       ret = -EBUSY;
-                       goto out;
+                       if (!curr_ctx ||
+                           (curr_ctx->replace_state ==
+                            IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+                           !list_empty(&curr_ctx->reserved_vifs)) {
+                               /*
+                                * Another vif already requested this context
+                                * for a reservation. Find another one hoping
+                                * all vifs assigned to it will also switch
+                                * soon enough.
+                                *
+                                * TODO: This needs a little more work as some
+                                * cases (more than 2 chanctx capable devices)
+                                * may fail which could otherwise succeed
+                                * provided some channel context juggling was
+                                * performed.
+                                *
+                                * Consider ctx1..3, vif1..6, each ctx has 2
+                                * vifs. vif1 and vif2 from ctx1 request new
+                                * different chandefs starting 2 in-place
+                                * reserations with ctx4 and ctx5 replacing
+                                * ctx1 and ctx2 respectively. Next vif5 and
+                                * vif6 from ctx3 reserve ctx4. If vif3 and
+                                * vif4 remain on ctx2 as they are then this
+                                * fails unless `replace_ctx` from ctx5 is
+                                * replaced with ctx3.
+                                */
+                               list_for_each_entry(ctx, &local->chanctx_list,
+                                                   list) {
+                                       if (ctx->replace_state !=
+                                           IEEE80211_CHANCTX_REPLACE_NONE)
+                                               continue;
+
+                                       if (!list_empty(&ctx->reserved_vifs))
+                                               continue;
+
+                                       curr_ctx = ctx;
+                                       break;
+                               }
+                       }
+
+                       /*
+                        * If that's true then all available contexts already
+                        * have reservations and cannot be used.
+                        */
+                       if (!curr_ctx ||
+                           (curr_ctx->replace_state ==
+                            IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+                           !list_empty(&curr_ctx->reserved_vifs))
+                               return -EBUSY;
+
+                       new_ctx = ieee80211_alloc_chanctx(local, chandef, mode);
+                       if (!new_ctx)
+                               return -ENOMEM;
+
+                       new_ctx->replace_ctx = curr_ctx;
+                       new_ctx->replace_state =
+                                       IEEE80211_CHANCTX_REPLACES_OTHER;
+
+                       curr_ctx->replace_ctx = new_ctx;
+                       curr_ctx->replace_state =
+                                       IEEE80211_CHANCTX_WILL_BE_REPLACED;
+
+                       list_add_rcu(&new_ctx->list, &local->chanctx_list);
                }
        }
 
@@ -958,82 +987,601 @@ int ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
        sdata->reserved_chanctx = new_ctx;
        sdata->reserved_chandef = *chandef;
        sdata->reserved_radar_required = radar_required;
-out:
-       mutex_unlock(&local->chanctx_mtx);
-       return ret;
+       sdata->reserved_ready = false;
+
+       return 0;
 }
 
-int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
-                                      u32 *changed)
+static void
+ieee80211_vif_chanctx_reservation_complete(struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx *ctx;
-       struct ieee80211_chanctx *old_ctx;
-       struct ieee80211_chanctx_conf *conf;
-       int ret;
-       u32 tmp_changed = *changed;
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_MESH_POINT:
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &sdata->csa_finalize_work);
+               break;
+       case NL80211_IFTYPE_STATION:
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &sdata->u.mgd.chswitch_work);
+               break;
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_AP_VLAN:
+       case NL80211_IFTYPE_WDS:
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_P2P_DEVICE:
+       case NUM_NL80211_IFTYPES:
+               WARN_ON(1);
+               break;
+       }
+}
 
-       /* TODO: need to recheck if the chandef is usable etc.? */
+static int
+ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_vif_chanctx_switch vif_chsw[1] = {};
+       struct ieee80211_chanctx *old_ctx, *new_ctx;
+       const struct cfg80211_chan_def *chandef;
+       u32 changed = 0;
+       int err;
 
        lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
 
-       mutex_lock(&local->chanctx_mtx);
+       new_ctx = sdata->reserved_chanctx;
+       old_ctx = ieee80211_vif_get_chanctx(sdata);
 
-       ctx = sdata->reserved_chanctx;
-       if (WARN_ON(!ctx)) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (WARN_ON(!sdata->reserved_ready))
+               return -EBUSY;
+
+       if (WARN_ON(!new_ctx))
+               return -EINVAL;
+
+       if (WARN_ON(!old_ctx))
+               return -EINVAL;
+
+       if (WARN_ON(new_ctx->replace_state ==
+                   IEEE80211_CHANCTX_REPLACES_OTHER))
+               return -EINVAL;
+
+       chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+                               &sdata->reserved_chandef);
+       if (WARN_ON(!chandef))
+               return -EINVAL;
+
+       vif_chsw[0].vif = &sdata->vif;
+       vif_chsw[0].old_ctx = &old_ctx->conf;
+       vif_chsw[0].new_ctx = &new_ctx->conf;
+
+       list_del(&sdata->reserved_chanctx_list);
+       sdata->reserved_chanctx = NULL;
+
+       err = drv_switch_vif_chanctx(local, vif_chsw, 1,
+                                    CHANCTX_SWMODE_REASSIGN_VIF);
+       if (err) {
+               if (ieee80211_chanctx_refcount(local, new_ctx) == 0)
+                       ieee80211_free_chanctx(local, new_ctx);
 
-       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
-                                        lockdep_is_held(&local->chanctx_mtx));
-       if (!conf) {
-               ret = -EINVAL;
                goto out;
        }
 
-       old_ctx = container_of(conf, struct ieee80211_chanctx, conf);
+       list_move(&sdata->assigned_chanctx_list, &new_ctx->assigned_vifs);
+       rcu_assign_pointer(sdata->vif.chanctx_conf, &new_ctx->conf);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP)
+               __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+
+       if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
+               ieee80211_free_chanctx(local, old_ctx);
 
        if (sdata->vif.bss_conf.chandef.width != sdata->reserved_chandef.width)
-               tmp_changed |= BSS_CHANGED_BANDWIDTH;
+               changed = BSS_CHANGED_BANDWIDTH;
 
        sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
 
-       /* unref our reservation */
-       sdata->reserved_chanctx = NULL;
-       sdata->radar_required = sdata->reserved_radar_required;
+       if (changed)
+               ieee80211_bss_info_change_notify(sdata, changed);
+
+out:
+       ieee80211_vif_chanctx_reservation_complete(sdata);
+       return err;
+}
+
+static int
+ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx *old_ctx, *new_ctx;
+       const struct cfg80211_chan_def *chandef;
+       int err;
+
+       old_ctx = ieee80211_vif_get_chanctx(sdata);
+       new_ctx = sdata->reserved_chanctx;
+
+       if (WARN_ON(!sdata->reserved_ready))
+               return -EINVAL;
+
+       if (WARN_ON(old_ctx))
+               return -EINVAL;
+
+       if (WARN_ON(!new_ctx))
+               return -EINVAL;
+
+       if (WARN_ON(new_ctx->replace_state ==
+                   IEEE80211_CHANCTX_REPLACES_OTHER))
+               return -EINVAL;
+
+       chandef = ieee80211_chanctx_non_reserved_chandef(local, new_ctx,
+                               &sdata->reserved_chandef);
+       if (WARN_ON(!chandef))
+               return -EINVAL;
+
        list_del(&sdata->reserved_chanctx_list);
+       sdata->reserved_chanctx = NULL;
 
-       if (old_ctx == ctx) {
-               /* This is our own context, just change it */
-               ret = __ieee80211_vif_change_channel(sdata, old_ctx,
-                                                    &tmp_changed);
-               if (ret)
-                       goto out;
-       } else {
-               ret = ieee80211_assign_vif_chanctx(sdata, ctx);
-               if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
-                       ieee80211_free_chanctx(local, old_ctx);
-               if (ret) {
-                       /* if assign fails refcount stays the same */
-                       if (ieee80211_chanctx_refcount(local, ctx) == 0)
-                               ieee80211_free_chanctx(local, ctx);
+       err = ieee80211_assign_vif_chanctx(sdata, new_ctx);
+       if (err) {
+               if (ieee80211_chanctx_refcount(local, new_ctx) == 0)
+                       ieee80211_free_chanctx(local, new_ctx);
+
+               goto out;
+       }
+
+out:
+       ieee80211_vif_chanctx_reservation_complete(sdata);
+       return err;
+}
+
+static bool
+ieee80211_vif_has_in_place_reservation(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_chanctx *old_ctx, *new_ctx;
+
+       lockdep_assert_held(&sdata->local->chanctx_mtx);
+
+       new_ctx = sdata->reserved_chanctx;
+       old_ctx = ieee80211_vif_get_chanctx(sdata);
+
+       if (!old_ctx)
+               return false;
+
+       if (WARN_ON(!new_ctx))
+               return false;
+
+       if (old_ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED)
+               return false;
+
+       if (new_ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+               return false;
+
+       return true;
+}
+
+static int ieee80211_chsw_switch_hwconf(struct ieee80211_local *local,
+                                       struct ieee80211_chanctx *new_ctx)
+{
+       const struct cfg80211_chan_def *chandef;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       chandef = ieee80211_chanctx_reserved_chandef(local, new_ctx, NULL);
+       if (WARN_ON(!chandef))
+               return -EINVAL;
+
+       local->hw.conf.radar_enabled = new_ctx->conf.radar_enabled;
+       local->_oper_chandef = *chandef;
+       ieee80211_hw_config(local, 0);
+
+       return 0;
+}
+
+static int ieee80211_chsw_switch_vifs(struct ieee80211_local *local,
+                                     int n_vifs)
+{
+       struct ieee80211_vif_chanctx_switch *vif_chsw;
+       struct ieee80211_sub_if_data *sdata;
+       struct ieee80211_chanctx *ctx, *old_ctx;
+       int i, err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       vif_chsw = kzalloc(sizeof(vif_chsw[0]) * n_vifs, GFP_KERNEL);
+       if (!vif_chsw)
+               return -ENOMEM;
+
+       i = 0;
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               if (WARN_ON(!ctx->replace_ctx)) {
+                       err = -EINVAL;
                        goto out;
                }
 
-               if (sdata->vif.type == NL80211_IFTYPE_AP)
-                       __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
+               list_for_each_entry(sdata, &ctx->reserved_vifs,
+                                   reserved_chanctx_list) {
+                       if (!ieee80211_vif_has_in_place_reservation(
+                                       sdata))
+                               continue;
+
+                       old_ctx = ieee80211_vif_get_chanctx(sdata);
+                       vif_chsw[i].vif = &sdata->vif;
+                       vif_chsw[i].old_ctx = &old_ctx->conf;
+                       vif_chsw[i].new_ctx = &ctx->conf;
+
+                       i++;
+               }
        }
 
-       *changed = tmp_changed;
+       err = drv_switch_vif_chanctx(local, vif_chsw, n_vifs,
+                                    CHANCTX_SWMODE_SWAP_CONTEXTS);
 
-       ieee80211_recalc_chanctx_chantype(local, ctx);
-       ieee80211_recalc_smps_chanctx(local, ctx);
-       ieee80211_recalc_radar_chanctx(local, ctx);
-       ieee80211_recalc_chanctx_min_def(local, ctx);
 out:
-       mutex_unlock(&local->chanctx_mtx);
-       return ret;
+       kfree(vif_chsw);
+       return err;
+}
+
+static int ieee80211_chsw_switch_ctxs(struct ieee80211_local *local)
+{
+       struct ieee80211_chanctx *ctx;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               if (!list_empty(&ctx->replace_ctx->assigned_vifs))
+                       continue;
+
+               ieee80211_del_chanctx(local, ctx->replace_ctx);
+               err = ieee80211_add_chanctx(local, ctx);
+               if (err)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       WARN_ON(ieee80211_add_chanctx(local, ctx));
+       list_for_each_entry_continue_reverse(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               if (!list_empty(&ctx->replace_ctx->assigned_vifs))
+                       continue;
+
+               ieee80211_del_chanctx(local, ctx);
+               WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx));
+       }
+
+       return err;
+}
+
+int
+ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
+{
+       struct ieee80211_sub_if_data *sdata, *sdata_tmp;
+       struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
+       struct ieee80211_chanctx *new_ctx = NULL;
+       int i, err, n_assigned, n_reserved, n_ready;
+       int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       /*
+        * If there are 2 independent pairs of channel contexts performing
+        * cross-switch of their vifs this code will still wait until both are
+        * ready even though it could be possible to switch one before the
+        * other is ready.
+        *
+        * For practical reasons and code simplicity just do a single huge
+        * switch.
+        */
+
+       /*
+        * Verify if the reservation is still feasible.
+        *  - if it's not then disconnect
+        *  - if it is but not all vifs necessary are ready then defer
+        */
+
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               if (WARN_ON(!ctx->replace_ctx)) {
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               if (!local->use_chanctx)
+                       new_ctx = ctx;
+
+               n_ctx++;
+
+               n_assigned = 0;
+               n_reserved = 0;
+               n_ready = 0;
+
+               list_for_each_entry(sdata, &ctx->replace_ctx->assigned_vifs,
+                                   assigned_chanctx_list) {
+                       n_assigned++;
+                       if (sdata->reserved_chanctx) {
+                               n_reserved++;
+                               if (sdata->reserved_ready)
+                                       n_ready++;
+                       }
+               }
+
+               if (n_assigned != n_reserved) {
+                       if (n_ready == n_reserved) {
+                               wiphy_info(local->hw.wiphy,
+                                          "channel context reservation cannot be finalized because some interfaces aren't switching\n");
+                               err = -EBUSY;
+                               goto err;
+                       }
+
+                       return -EAGAIN;
+               }
+
+               ctx->conf.radar_enabled = false;
+               list_for_each_entry(sdata, &ctx->reserved_vifs,
+                                   reserved_chanctx_list) {
+                       if (ieee80211_vif_has_in_place_reservation(sdata) &&
+                           !sdata->reserved_ready)
+                               return -EAGAIN;
+
+                       old_ctx = ieee80211_vif_get_chanctx(sdata);
+                       if (old_ctx) {
+                               if (old_ctx->replace_state ==
+                                   IEEE80211_CHANCTX_WILL_BE_REPLACED)
+                                       n_vifs_switch++;
+                               else
+                                       n_vifs_assign++;
+                       } else {
+                               n_vifs_ctxless++;
+                       }
+
+                       if (sdata->reserved_radar_required)
+                               ctx->conf.radar_enabled = true;
+               }
+       }
+
+       if (WARN_ON(n_ctx == 0) ||
+           WARN_ON(n_vifs_switch == 0 &&
+                   n_vifs_assign == 0 &&
+                   n_vifs_ctxless == 0) ||
+           WARN_ON(n_ctx > 1 && !local->use_chanctx) ||
+           WARN_ON(!new_ctx && !local->use_chanctx)) {
+               err = -EINVAL;
+               goto err;
+       }
+
+       /*
+        * All necessary vifs are ready. Perform the switch now depending on
+        * reservations and driver capabilities.
+        */
+
+       if (local->use_chanctx) {
+               if (n_vifs_switch > 0) {
+                       err = ieee80211_chsw_switch_vifs(local, n_vifs_switch);
+                       if (err)
+                               goto err;
+               }
+
+               if (n_vifs_assign > 0 || n_vifs_ctxless > 0) {
+                       err = ieee80211_chsw_switch_ctxs(local);
+                       if (err)
+                               goto err;
+               }
+       } else {
+               err = ieee80211_chsw_switch_hwconf(local, new_ctx);
+               if (err)
+                       goto err;
+       }
+
+       /*
+        * Update all structures, values and pointers to point to new channel
+        * context(s).
+        */
+
+       i = 0;
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               if (WARN_ON(!ctx->replace_ctx)) {
+                       err = -EINVAL;
+                       goto err;
+               }
+
+               list_for_each_entry(sdata, &ctx->reserved_vifs,
+                                   reserved_chanctx_list) {
+                       u32 changed = 0;
+
+                       if (!ieee80211_vif_has_in_place_reservation(sdata))
+                               continue;
+
+                       rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf);
+
+                       if (sdata->vif.type == NL80211_IFTYPE_AP)
+                               __ieee80211_vif_copy_chanctx_to_vlans(sdata,
+                                                                     false);
+
+                       sdata->radar_required = sdata->reserved_radar_required;
+
+                       if (sdata->vif.bss_conf.chandef.width !=
+                           sdata->reserved_chandef.width)
+                               changed = BSS_CHANGED_BANDWIDTH;
+
+                       sdata->vif.bss_conf.chandef = sdata->reserved_chandef;
+                       if (changed)
+                               ieee80211_bss_info_change_notify(sdata,
+                                                                changed);
+
+                       ieee80211_recalc_txpower(sdata);
+               }
+
+               ieee80211_recalc_chanctx_chantype(local, ctx);
+               ieee80211_recalc_smps_chanctx(local, ctx);
+               ieee80211_recalc_radar_chanctx(local, ctx);
+               ieee80211_recalc_chanctx_min_def(local, ctx);
+
+               list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+                                        reserved_chanctx_list) {
+                       if (ieee80211_vif_get_chanctx(sdata) != ctx)
+                               continue;
+
+                       list_del(&sdata->reserved_chanctx_list);
+                       list_move(&sdata->assigned_chanctx_list,
+                                 &new_ctx->assigned_vifs);
+                       sdata->reserved_chanctx = NULL;
+
+                       ieee80211_vif_chanctx_reservation_complete(sdata);
+               }
+
+               /*
+                * This context might have been a dependency for an already
+                * ready re-assign reservation interface that was deferred. Do
+                * not propagate error to the caller though. The in-place
+                * reservation for originally requested interface has already
+                * succeeded at this point.
+                */
+               list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+                                        reserved_chanctx_list) {
+                       if (WARN_ON(ieee80211_vif_has_in_place_reservation(
+                                       sdata)))
+                               continue;
+
+                       if (WARN_ON(sdata->reserved_chanctx != ctx))
+                               continue;
+
+                       if (!sdata->reserved_ready)
+                               continue;
+
+                       if (ieee80211_vif_get_chanctx(sdata))
+                               err = ieee80211_vif_use_reserved_reassign(
+                                               sdata);
+                       else
+                               err = ieee80211_vif_use_reserved_assign(sdata);
+
+                       if (err) {
+                               sdata_info(sdata,
+                                          "failed to finalize (re-)assign reservation (err=%d)\n",
+                                          err);
+                               ieee80211_vif_unreserve_chanctx(sdata);
+                               cfg80211_stop_iface(local->hw.wiphy,
+                                                   &sdata->wdev,
+                                                   GFP_KERNEL);
+                       }
+               }
+       }
+
+       /*
+        * Finally free old contexts
+        */
+
+       list_for_each_entry_safe(ctx, ctx_tmp, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED)
+                       continue;
+
+               ctx->replace_ctx->replace_ctx = NULL;
+               ctx->replace_ctx->replace_state =
+                               IEEE80211_CHANCTX_REPLACE_NONE;
+
+               list_del_rcu(&ctx->list);
+               kfree_rcu(ctx, rcu_head);
+       }
+
+       return 0;
+
+err:
+       list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
+                       continue;
+
+               list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs,
+                                        reserved_chanctx_list) {
+                       ieee80211_vif_unreserve_chanctx(sdata);
+                       ieee80211_vif_chanctx_reservation_complete(sdata);
+               }
+       }
+
+       return err;
+}
+
+int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx *new_ctx;
+       struct ieee80211_chanctx *old_ctx;
+       int err;
+
+       lockdep_assert_held(&local->mtx);
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       new_ctx = sdata->reserved_chanctx;
+       old_ctx = ieee80211_vif_get_chanctx(sdata);
+
+       if (WARN_ON(!new_ctx))
+               return -EINVAL;
+
+       if (WARN_ON(new_ctx->replace_state ==
+                   IEEE80211_CHANCTX_WILL_BE_REPLACED))
+               return -EINVAL;
+
+       if (WARN_ON(sdata->reserved_ready))
+               return -EINVAL;
+
+       sdata->reserved_ready = true;
+
+       if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
+               if (old_ctx)
+                       err = ieee80211_vif_use_reserved_reassign(sdata);
+               else
+                       err = ieee80211_vif_use_reserved_assign(sdata);
+
+               if (err)
+                       return err;
+       }
+
+       /*
+        * In-place reservation may need to be finalized now either if:
+        *  a) sdata is taking part in the swapping itself and is the last one
+        *  b) sdata has switched with a re-assign reservation to an existing
+        *     context readying in-place switching of old_ctx
+        *
+        * In case of (b) do not propagate the error up because the requested
+        * sdata already switched successfully. Just spill an extra warning.
+        * The ieee80211_vif_use_reserved_switch() already stops all necessary
+        * interfaces upon failure.
+        */
+       if ((old_ctx &&
+            old_ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) ||
+           new_ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) {
+               err = ieee80211_vif_use_reserved_switch(local);
+               if (err && err != -EAGAIN) {
+                       if (new_ctx->replace_state ==
+                           IEEE80211_CHANCTX_REPLACES_OTHER)
+                               return err;
+
+                       wiphy_info(local->hw.wiphy,
+                                  "depending in-place reservation failed (err=%d)\n",
+                                  err);
+               }
+       }
+
+       return 0;
 }
 
 int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
@@ -1043,6 +1591,7 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *ctx;
+       const struct cfg80211_chan_def *compat;
        int ret;
 
        if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
@@ -1069,11 +1618,33 @@ int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
        }
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
-       if (!cfg80211_chandef_compatible(&conf->def, chandef)) {
+
+       compat = cfg80211_chandef_compatible(&conf->def, chandef);
+       if (!compat) {
                ret = -EINVAL;
                goto out;
        }
 
+       switch (ctx->replace_state) {
+       case IEEE80211_CHANCTX_REPLACE_NONE:
+               if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+               break;
+       case IEEE80211_CHANCTX_WILL_BE_REPLACED:
+               /* TODO: Perhaps the bandwith change could be treated as a
+                * reservation itself? */
+               ret = -EBUSY;
+               goto out;
+       case IEEE80211_CHANCTX_REPLACES_OTHER:
+               /* channel context that is going to replace another channel
+                * context doesn't really exist and shouldn't be assigned
+                * anywhere yet */
+               WARN_ON(1);
+               break;
+       }
+
        sdata->vif.bss_conf.chandef = *chandef;
 
        ieee80211_recalc_chanctx_chantype(local, ctx);
index 2ecb4deddb5df0ca74eb9630de15bbc9c789f274..3db96648b45a02c0e0235b724210a15ba32747b7 100644 (file)
@@ -124,7 +124,7 @@ static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
        long connected_time_secs;
        char buf[100];
        int res;
-       do_posix_clock_monotonic_gettime(&uptime);
+       ktime_get_ts(&uptime);
        connected_time_secs = uptime.tv_sec - sta->last_connected;
        time_to_tm(connected_time_secs, 0, &result);
        result.tm_year -= 70;
@@ -587,7 +587,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
        DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
        DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
        DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
-       DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
 
        if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
                debugfs_create_x32("driver_buffered_tids", 0400,
index bd782dcffcc7b81478277bcb2b5545dfef545da8..11423958116a3fc9e37c91684de3aade8fdef763 100644 (file)
@@ -314,7 +314,7 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
 
 static inline int drv_hw_scan(struct ieee80211_local *local,
                              struct ieee80211_sub_if_data *sdata,
-                             struct cfg80211_scan_request *req)
+                             struct ieee80211_scan_request *req)
 {
        int ret;
 
@@ -346,7 +346,7 @@ static inline int
 drv_sched_scan_start(struct ieee80211_local *local,
                     struct ieee80211_sub_if_data *sdata,
                     struct cfg80211_sched_scan_request *req,
-                    struct ieee80211_sched_scan_ies *ies)
+                    struct ieee80211_scan_ies *ies)
 {
        int ret;
 
@@ -970,6 +970,22 @@ static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
        trace_drv_return_void(local);
 }
 
+static inline void
+drv_mgd_protect_tdls_discover(struct ieee80211_local *local,
+                             struct ieee80211_sub_if_data *sdata)
+{
+       might_sleep();
+
+       if (!check_sdata_in_driver(sdata))
+               return;
+       WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+
+       trace_drv_mgd_protect_tdls_discover(local, sdata);
+       if (local->ops->mgd_protect_tdls_discover)
+               local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif);
+       trace_drv_return_void(local);
+}
+
 static inline int drv_add_chanctx(struct ieee80211_local *local,
                                  struct ieee80211_chanctx *ctx)
 {
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
new file mode 100644 (file)
index 0000000..ebfc809
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * mac80211 ethtool hooks for cfg80211
+ *
+ * Copied from cfg.c - originally
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2014      Intel Corporation (Author: Johannes Berg)
+ *
+ * This file is GPLv2 as found in COPYING.
+ */
+#include <linux/types.h>
+#include <net/cfg80211.h>
+#include "ieee80211_i.h"
+#include "sta_info.h"
+#include "driver-ops.h"
+
+static int ieee80211_set_ringparam(struct net_device *dev,
+                                  struct ethtool_ringparam *rp)
+{
+       struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
+
+       if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
+               return -EINVAL;
+
+       return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
+}
+
+static void ieee80211_get_ringparam(struct net_device *dev,
+                                   struct ethtool_ringparam *rp)
+{
+       struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
+
+       memset(rp, 0, sizeof(*rp));
+
+       drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
+                         &rp->rx_pending, &rp->rx_max_pending);
+}
+
+static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
+       "rx_packets", "rx_bytes",
+       "rx_duplicates", "rx_fragments", "rx_dropped",
+       "tx_packets", "tx_bytes", "tx_fragments",
+       "tx_filtered", "tx_retry_failed", "tx_retries",
+       "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+       "channel", "noise", "ch_time", "ch_time_busy",
+       "ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
+};
+#define STA_STATS_LEN  ARRAY_SIZE(ieee80211_gstrings_sta_stats)
+
+static int ieee80211_get_sset_count(struct net_device *dev, int sset)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       int rv = 0;
+
+       if (sset == ETH_SS_STATS)
+               rv += STA_STATS_LEN;
+
+       rv += drv_get_et_sset_count(sdata, sset);
+
+       if (rv == 0)
+               return -EOPNOTSUPP;
+       return rv;
+}
+
+static void ieee80211_get_stats(struct net_device *dev,
+                               struct ethtool_stats *stats,
+                               u64 *data)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_channel *channel;
+       struct sta_info *sta;
+       struct ieee80211_local *local = sdata->local;
+       struct station_info sinfo;
+       struct survey_info survey;
+       int i, q;
+#define STA_STATS_SURVEY_LEN 7
+
+       memset(data, 0, sizeof(u64) * STA_STATS_LEN);
+
+#define ADD_STA_STATS(sta)                             \
+       do {                                            \
+               data[i++] += sta->rx_packets;           \
+               data[i++] += sta->rx_bytes;             \
+               data[i++] += sta->num_duplicates;       \
+               data[i++] += sta->rx_fragments;         \
+               data[i++] += sta->rx_dropped;           \
+                                                       \
+               data[i++] += sinfo.tx_packets;          \
+               data[i++] += sinfo.tx_bytes;            \
+               data[i++] += sta->tx_fragments;         \
+               data[i++] += sta->tx_filtered_count;    \
+               data[i++] += sta->tx_retry_failed;      \
+               data[i++] += sta->tx_retry_count;       \
+               data[i++] += sta->beacon_loss_count;    \
+       } while (0)
+
+       /* For Managed stations, find the single station based on BSSID
+        * and use that.  For interface types, iterate through all available
+        * stations and add stats for any station that is assigned to this
+        * network device.
+        */
+
+       mutex_lock(&local->sta_mtx);
+
+       if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+               sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
+
+               if (!(sta && !WARN_ON(sta->sdata->dev != dev)))
+                       goto do_survey;
+
+               sinfo.filled = 0;
+               sta_set_sinfo(sta, &sinfo);
+
+               i = 0;
+               ADD_STA_STATS(sta);
+
+               data[i++] = sta->sta_state;
+
+
+               if (sinfo.filled & STATION_INFO_TX_BITRATE)
+                       data[i] = 100000 *
+                               cfg80211_calculate_bitrate(&sinfo.txrate);
+               i++;
+               if (sinfo.filled & STATION_INFO_RX_BITRATE)
+                       data[i] = 100000 *
+                               cfg80211_calculate_bitrate(&sinfo.rxrate);
+               i++;
+
+               if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
+                       data[i] = (u8)sinfo.signal_avg;
+               i++;
+       } else {
+               list_for_each_entry(sta, &local->sta_list, list) {
+                       /* Make sure this station belongs to the proper dev */
+                       if (sta->sdata->dev != dev)
+                               continue;
+
+                       sinfo.filled = 0;
+                       sta_set_sinfo(sta, &sinfo);
+                       i = 0;
+                       ADD_STA_STATS(sta);
+               }
+       }
+
+do_survey:
+       i = STA_STATS_LEN - STA_STATS_SURVEY_LEN;
+       /* Get survey stats for current channel */
+       survey.filled = 0;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (chanctx_conf)
+               channel = chanctx_conf->def.chan;
+       else
+               channel = NULL;
+       rcu_read_unlock();
+
+       if (channel) {
+               q = 0;
+               do {
+                       survey.filled = 0;
+                       if (drv_get_survey(local, q, &survey) != 0) {
+                               survey.filled = 0;
+                               break;
+                       }
+                       q++;
+               } while (channel != survey.channel);
+       }
+
+       if (survey.filled)
+               data[i++] = survey.channel->center_freq;
+       else
+               data[i++] = 0;
+       if (survey.filled & SURVEY_INFO_NOISE_DBM)
+               data[i++] = (u8)survey.noise;
+       else
+               data[i++] = -1LL;
+       if (survey.filled & SURVEY_INFO_CHANNEL_TIME)
+               data[i++] = survey.channel_time;
+       else
+               data[i++] = -1LL;
+       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_BUSY)
+               data[i++] = survey.channel_time_busy;
+       else
+               data[i++] = -1LL;
+       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY)
+               data[i++] = survey.channel_time_ext_busy;
+       else
+               data[i++] = -1LL;
+       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_RX)
+               data[i++] = survey.channel_time_rx;
+       else
+               data[i++] = -1LL;
+       if (survey.filled & SURVEY_INFO_CHANNEL_TIME_TX)
+               data[i++] = survey.channel_time_tx;
+       else
+               data[i++] = -1LL;
+
+       mutex_unlock(&local->sta_mtx);
+
+       if (WARN_ON(i != STA_STATS_LEN))
+               return;
+
+       drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
+}
+
+static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       int sz_sta_stats = 0;
+
+       if (sset == ETH_SS_STATS) {
+               sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
+               memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
+       }
+       drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
+}
+
+static int ieee80211_get_regs_len(struct net_device *dev)
+{
+       return 0;
+}
+
+static void ieee80211_get_regs(struct net_device *dev,
+                              struct ethtool_regs *regs,
+                              void *data)
+{
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+       regs->version = wdev->wiphy->hw_version;
+       regs->len = 0;
+}
+
+const struct ethtool_ops ieee80211_ethtool_ops = {
+       .get_drvinfo = cfg80211_get_drvinfo,
+       .get_regs_len = ieee80211_get_regs_len,
+       .get_regs = ieee80211_get_regs,
+       .get_link = ethtool_op_get_link,
+       .get_ringparam = ieee80211_get_ringparam,
+       .set_ringparam = ieee80211_set_ringparam,
+       .get_strings = ieee80211_get_strings,
+       .get_ethtool_stats = ieee80211_get_stats,
+       .get_sset_count = ieee80211_get_sset_count,
+};
index 18ee0a256b1e300d5fc4805d110af12ca1a77308..713485f9effc01000d33369b056236307822f838 100644 (file)
@@ -143,7 +143,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                *pos++ = csa_settings->block_tx ? 1 : 0;
                *pos++ = ieee80211_frequency_to_channel(
                                csa_settings->chandef.chan->center_freq);
-               sdata->csa_counter_offset_beacon[0] = (pos - presp->head);
+               presp->csa_counter_offsets[0] = (pos - presp->head);
                *pos++ = csa_settings->count;
        }
 
index ac9836e0aab335ddf75295b5ad020d77e0e3dd0a..9e025e1184cc3b95fb9adfcdc768f0cecf5e35ee 100644 (file)
@@ -229,16 +229,29 @@ struct ieee80211_rx_data {
        u16 tkip_iv16;
 };
 
+struct ieee80211_csa_settings {
+       const u16 *counter_offsets_beacon;
+       const u16 *counter_offsets_presp;
+
+       int n_counter_offsets_beacon;
+       int n_counter_offsets_presp;
+
+       u8 count;
+};
+
 struct beacon_data {
        u8 *head, *tail;
        int head_len, tail_len;
        struct ieee80211_meshconf_ie *meshconf;
+       u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
+       u8 csa_current_counter;
        struct rcu_head rcu_head;
 };
 
 struct probe_resp {
        struct rcu_head rcu_head;
        int len;
+       u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
        u8 data[0];
 };
 
@@ -688,6 +701,24 @@ enum ieee80211_chanctx_mode {
        IEEE80211_CHANCTX_EXCLUSIVE
 };
 
+/**
+ * enum ieee80211_chanctx_replace_state - channel context replacement state
+ *
+ * This is used for channel context in-place reservations that require channel
+ * context switch/swap.
+ *
+ * @IEEE80211_CHANCTX_REPLACE_NONE: no replacement is taking place
+ * @IEEE80211_CHANCTX_WILL_BE_REPLACED: this channel context will be replaced
+ *     by a (not yet registered) channel context pointed by %replace_ctx.
+ * @IEEE80211_CHANCTX_REPLACES_OTHER: this (not yet registered) channel context
+ *     replaces an existing channel context pointed to by %replace_ctx.
+ */
+enum ieee80211_chanctx_replace_state {
+       IEEE80211_CHANCTX_REPLACE_NONE,
+       IEEE80211_CHANCTX_WILL_BE_REPLACED,
+       IEEE80211_CHANCTX_REPLACES_OTHER,
+};
+
 struct ieee80211_chanctx {
        struct list_head list;
        struct rcu_head rcu_head;
@@ -695,6 +726,9 @@ struct ieee80211_chanctx {
        struct list_head assigned_vifs;
        struct list_head reserved_vifs;
 
+       enum ieee80211_chanctx_replace_state replace_state;
+       struct ieee80211_chanctx *replace_ctx;
+
        enum ieee80211_chanctx_mode mode;
        bool driver_present;
 
@@ -754,9 +788,6 @@ struct ieee80211_sub_if_data {
        struct mac80211_qos_map __rcu *qos_map;
 
        struct work_struct csa_finalize_work;
-       u16 csa_counter_offset_beacon[IEEE80211_MAX_CSA_COUNTERS_NUM];
-       u16 csa_counter_offset_presp[IEEE80211_MAX_CSA_COUNTERS_NUM];
-       bool csa_radar_required;
        bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */
        struct cfg80211_chan_def csa_chandef;
 
@@ -767,7 +798,7 @@ struct ieee80211_sub_if_data {
        struct ieee80211_chanctx *reserved_chanctx;
        struct cfg80211_chan_def reserved_chandef;
        bool reserved_radar_required;
-       u8 csa_current_counter;
+       bool reserved_ready;
 
        /* used to reconfigure hardware SM PS */
        struct work_struct recalc_smps;
@@ -784,6 +815,9 @@ struct ieee80211_sub_if_data {
        bool radar_required;
        struct delayed_work dfs_cac_timer_work;
 
+       u8 tdls_peer[ETH_ALEN] __aligned(2);
+       struct delayed_work tdls_peer_del_work;
+
        /*
         * AP this belongs to: self in AP mode and
         * corresponding AP in VLAN mode, NULL for
@@ -912,6 +946,9 @@ enum queue_stop_reason {
        IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
        IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
        IEEE80211_QUEUE_STOP_REASON_FLUSH,
+       IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
+
+       IEEE80211_QUEUE_STOP_REASONS,
 };
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -1008,6 +1045,7 @@ struct ieee80211_local {
        struct workqueue_struct *workqueue;
 
        unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
+       int q_stop_reasons[IEEE80211_MAX_QUEUES][IEEE80211_QUEUE_STOP_REASONS];
        /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
        spinlock_t queue_stop_reason_lock;
 
@@ -1135,7 +1173,8 @@ struct ieee80211_local {
        unsigned long scanning;
        struct cfg80211_ssid scan_ssid;
        struct cfg80211_scan_request *int_scan_req;
-       struct cfg80211_scan_request *scan_req, *hw_scan_req;
+       struct cfg80211_scan_request *scan_req;
+       struct ieee80211_scan_request *hw_scan_req;
        struct cfg80211_chan_def scan_chandef;
        enum ieee80211_band hw_scan_band;
        int scan_channel_idx;
@@ -1476,7 +1515,6 @@ void ieee80211_sw_roc_work(struct work_struct *work);
 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
 
 /* channel switch handling */
-bool ieee80211_csa_needs_block_tx(struct ieee80211_local *local);
 void ieee80211_csa_finalize_work(struct work_struct *work);
 int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                             struct cfg80211_csa_settings *params);
@@ -1705,14 +1743,24 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
                                     unsigned long queues,
-                                    enum queue_stop_reason reason);
+                                    enum queue_stop_reason reason,
+                                    bool refcounted);
+void ieee80211_stop_vif_queues(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata,
+                              enum queue_stop_reason reason);
+void ieee80211_wake_vif_queues(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata,
+                              enum queue_stop_reason reason);
 void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
                                     unsigned long queues,
-                                    enum queue_stop_reason reason);
+                                    enum queue_stop_reason reason,
+                                    bool refcounted);
 void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
-                                   enum queue_stop_reason reason);
+                                   enum queue_stop_reason reason,
+                                   bool refcounted);
 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
-                                   enum queue_stop_reason reason);
+                                   enum queue_stop_reason reason,
+                                   bool refcounted);
 void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
 void ieee80211_add_pending_skb(struct ieee80211_local *local,
                               struct sk_buff *skb);
@@ -1730,8 +1778,10 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
                                    const u8 *bssid, u16 stype, u16 reason,
                                    bool send_frame, u8 *frame_buf);
 int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
-                            size_t buffer_len, const u8 *ie, size_t ie_len,
-                            enum ieee80211_band band, u32 rate_mask,
+                            size_t buffer_len,
+                            struct ieee80211_scan_ies *ie_desc,
+                            const u8 *ie, size_t ie_len,
+                            u8 bands_used, u32 *rate_masks,
                             struct cfg80211_chan_def *chandef);
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
@@ -1791,18 +1841,14 @@ ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
                              enum ieee80211_chanctx_mode mode,
                              bool radar_required);
 int __must_check
-ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
-                                  u32 *changed);
+ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata);
 int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
+int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local);
 
 int __must_check
 ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
                               const struct cfg80211_chan_def *chandef,
                               u32 *changed);
-/* NOTE: only use ieee80211_vif_change_channel() for channel switch */
-int __must_check
-ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
-                            u32 *changed);
 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
@@ -1842,11 +1888,14 @@ int ieee80211_max_num_channels(struct ieee80211_local *local);
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, u8 action_code, u8 dialog_token,
                        u16 status_code, u32 peer_capability,
-                       const u8 *extra_ies, size_t extra_ies_len);
+                       bool initiator, const u8 *extra_ies,
+                       size_t extra_ies_len);
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, enum nl80211_tdls_operation oper);
 
 
+extern const struct ethtool_ops ieee80211_ethtool_ops;
+
 #ifdef CONFIG_MAC80211_NOINLINE
 #define debug_noinline noinline
 #else
@@ -1854,3 +1903,4 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 #endif
 
 #endif /* IEEE80211_I_H */
+void ieee80211_tdls_peer_del_work(struct work_struct *wk);
index 388b863e821c6beedbcdb01a602c0d18db701d2f..4edfc7c1524ff9d162e493122a58a2a98bc0ce32 100644 (file)
@@ -841,10 +841,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        sdata_lock(sdata);
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
        mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
 
@@ -1623,9 +1624,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                if (local->hw.queues >= IEEE80211_NUM_ACS)
                        txqs = IEEE80211_NUM_ACS;
 
-               ndev = alloc_netdev_mqs(sizeof(*sdata) +
-                                       local->hw.vif_data_size,
-                                       name, ieee80211_if_setup, txqs, 1);
+               ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
+                                       name, NET_NAME_UNKNOWN,
+                                       ieee80211_if_setup, txqs, 1);
                if (!ndev)
                        return -ENOMEM;
                dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1671,6 +1672,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                          ieee80211_dfs_cac_timer_work);
        INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
                          ieee80211_delayed_tailroom_dec);
+       INIT_DELAYED_WORK(&sdata->tdls_peer_del_work,
+                         ieee80211_tdls_peer_del_work);
 
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                struct ieee80211_supported_band *sband;
@@ -1705,6 +1708,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ndev->features |= local->hw.netdev_features;
 
+               netdev_set_default_ethtool_ops(ndev, &ieee80211_ethtool_ops);
+
                ret = register_netdevice(ndev);
                if (ret) {
                        free_netdev(ndev);
index d17c26d6e369f8db71061f3c73f4f27196ed9e3d..e0ab4320a078528b27e101c17bfd70ad422ac4fb 100644 (file)
@@ -272,7 +272,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
 
        /* use this reason, ieee80211_reconfig will unblock it */
        ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+                                       false);
 
        /*
         * Stop all Rx during the reconfig. We don't want state changes
@@ -1187,18 +1188,12 @@ static int __init ieee80211_init(void)
        if (ret)
                goto err_minstrel;
 
-       ret = rc80211_pid_init();
-       if (ret)
-               goto err_pid;
-
        ret = ieee80211_iface_init();
        if (ret)
                goto err_netdev;
 
        return 0;
  err_netdev:
-       rc80211_pid_exit();
- err_pid:
        rc80211_minstrel_ht_exit();
  err_minstrel:
        rc80211_minstrel_exit();
@@ -1208,7 +1203,6 @@ static int __init ieee80211_init(void)
 
 static void __exit ieee80211_exit(void)
 {
-       rc80211_pid_exit();
        rc80211_minstrel_ht_exit();
        rc80211_minstrel_exit();
 
index 6495a3f0428dae6a93bafea04da26b7390f45599..e9f99c1e3fad5905682a61f978d9897833426fd0 100644 (file)
@@ -679,7 +679,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
                *pos++ = 0x0;
                *pos++ = ieee80211_frequency_to_channel(
                                csa->settings.chandef.chan->center_freq);
-               sdata->csa_counter_offset_beacon[0] = hdr_len + 6;
+               bcn->csa_counter_offsets[0] = hdr_len + 6;
                *pos++ = csa->settings.count;
                *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
                *pos++ = 6;
@@ -1122,7 +1122,7 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
        mgmt_fwd = (struct ieee80211_mgmt *) skb_put(skb, len);
 
        /* offset_ttl is based on whether the secondary channel
-        * offset is available or not. Substract 1 from the mesh TTL
+        * offset is available or not. Subtract 1 from the mesh TTL
         * and disable the initiator flag before forwarding.
         */
        offset_ttl = (len < 42) ? 7 : 10;
index 94758b9c9ed48a5d1ea9921f4f9f0da40e34bd0b..214e63b84e5c0eab2a603f7a47c6084a6e567d16 100644 (file)
@@ -157,7 +157,6 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
        default:
                kfree_skb(skb);
                return -ENOTSUPP;
-               break;
        }
        *pos++ = ie_len;
        *pos++ = flags;
index e8f60aa2e848b16982993f134fc63511186542af..63b874101b2763d5997dc561073e96807596c11a 100644 (file)
@@ -551,11 +551,30 @@ static void mesh_plink_timer(unsigned long data)
                return;
 
        spin_lock_bh(&sta->lock);
-       if (sta->ignore_plink_timer) {
-               sta->ignore_plink_timer = false;
+
+       /* If a timer fires just before a state transition on another CPU,
+        * we may have already extended the timeout and changed state by the
+        * time we've acquired the lock and arrived  here.  In that case,
+        * skip this timer and wait for the new one.
+        */
+       if (time_before(jiffies, sta->plink_timer.expires)) {
+               mpl_dbg(sta->sdata,
+                       "Ignoring timer for %pM in state %s (timer adjusted)",
+                       sta->sta.addr, mplstates[sta->plink_state]);
                spin_unlock_bh(&sta->lock);
                return;
        }
+
+       /* del_timer() and handler may race when entering these states */
+       if (sta->plink_state == NL80211_PLINK_LISTEN ||
+           sta->plink_state == NL80211_PLINK_ESTAB) {
+               mpl_dbg(sta->sdata,
+                       "Ignoring timer for %pM in state %s (timer deleted)",
+                       sta->sta.addr, mplstates[sta->plink_state]);
+               spin_unlock_bh(&sta->lock);
+               return;
+       }
+
        mpl_dbg(sta->sdata,
                "Mesh plink timer for %pM fired on state %s\n",
                sta->sta.addr, mplstates[sta->plink_state]);
@@ -773,9 +792,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                        break;
                case CNF_ACPT:
                        sta->plink_state = NL80211_PLINK_CNF_RCVD;
-                       if (!mod_plink_timer(sta,
-                                            mshcfg->dot11MeshConfirmTimeout))
-                               sta->ignore_plink_timer = true;
+                       mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
                        break;
                default:
                        break;
@@ -834,8 +851,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        case NL80211_PLINK_HOLDING:
                switch (event) {
                case CLS_ACPT:
-                       if (del_timer(&sta->plink_timer))
-                               sta->ignore_plink_timer = 1;
+                       del_timer(&sta->plink_timer);
                        mesh_plink_fsm_restart(sta);
                        break;
                case OPN_ACPT:
index 3345401be1b3c26744cb2ab6e384672a0cab0d6b..931330bbe00c589c33393c6b3869737641c9b307 100644 (file)
@@ -940,51 +940,70 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       u32 changed = 0;
        int ret;
 
        if (!ieee80211_sdata_running(sdata))
                return;
 
        sdata_lock(sdata);
+       mutex_lock(&local->mtx);
+       mutex_lock(&local->chanctx_mtx);
+
        if (!ifmgd->associated)
                goto out;
 
-       mutex_lock(&local->mtx);
-       ret = ieee80211_vif_change_channel(sdata, &changed);
-       mutex_unlock(&local->mtx);
-       if (ret) {
+       if (!sdata->vif.csa_active)
+               goto out;
+
+       /*
+        * using reservation isn't immediate as it may be deferred until later
+        * with multi-vif. once reservation is complete it will re-schedule the
+        * work with no reserved_chanctx so verify chandef to check if it
+        * completed successfully
+        */
+
+       if (sdata->reserved_chanctx) {
+               /*
+                * with multi-vif csa driver may call ieee80211_csa_finish()
+                * many times while waiting for other interfaces to use their
+                * reservations
+                */
+               if (sdata->reserved_ready)
+                       goto out;
+
+               ret = ieee80211_vif_use_reserved_context(sdata);
+               if (ret) {
+                       sdata_info(sdata,
+                                  "failed to use reserved channel context, disconnecting (err=%d)\n",
+                                  ret);
+                       ieee80211_queue_work(&sdata->local->hw,
+                                            &ifmgd->csa_connection_drop_work);
+                       goto out;
+               }
+
+               goto out;
+       }
+
+       if (!cfg80211_chandef_identical(&sdata->vif.bss_conf.chandef,
+                                       &sdata->csa_chandef)) {
                sdata_info(sdata,
-                          "vif channel switch failed, disconnecting\n");
+                          "failed to finalize channel switch, disconnecting\n");
                ieee80211_queue_work(&sdata->local->hw,
                                     &ifmgd->csa_connection_drop_work);
                goto out;
        }
 
-       if (!local->use_chanctx) {
-               local->_oper_chandef = sdata->csa_chandef;
-               /* Call "hw_config" only if doing sw channel switch.
-                * Otherwise update the channel directly
-                */
-               if (!local->ops->channel_switch)
-                       ieee80211_hw_config(local, 0);
-               else
-                       local->hw.conf.chandef = local->_oper_chandef;
-       }
-
        /* XXX: shouldn't really modify cfg80211-owned data! */
        ifmgd->associated->channel = sdata->csa_chandef.chan;
 
-       ieee80211_bss_info_change_notify(sdata, changed);
-
-       mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
+
        /* XXX: wait for a beacon first? */
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
-       mutex_unlock(&local->mtx);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
 
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
 
@@ -992,6 +1011,8 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        ieee80211_sta_reset_conn_monitor(sdata);
 
 out:
+       mutex_unlock(&local->chanctx_mtx);
+       mutex_unlock(&local->mtx);
        sdata_unlock(sdata);
 }
 
@@ -1028,6 +1049,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct cfg80211_bss *cbss = ifmgd->associated;
+       struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *chanctx;
        enum ieee80211_band current_band;
        struct ieee80211_csa_ie csa_ie;
@@ -1071,7 +1093,22 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 
        ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
 
+       mutex_lock(&local->mtx);
        mutex_lock(&local->chanctx_mtx);
+       conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       if (!conf) {
+               sdata_info(sdata,
+                          "no channel context assigned to vif?, disconnecting\n");
+               ieee80211_queue_work(&local->hw,
+                                    &ifmgd->csa_connection_drop_work);
+               mutex_unlock(&local->chanctx_mtx);
+               mutex_unlock(&local->mtx);
+               return;
+       }
+
+       chanctx = container_of(conf, struct ieee80211_chanctx, conf);
+
        if (local->use_chanctx) {
                u32 num_chanctx = 0;
                list_for_each_entry(chanctx, &local->chanctx_list, list)
@@ -1084,38 +1121,32 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                        ieee80211_queue_work(&local->hw,
                                             &ifmgd->csa_connection_drop_work);
                        mutex_unlock(&local->chanctx_mtx);
+                       mutex_unlock(&local->mtx);
                        return;
                }
        }
 
-       if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               mutex_unlock(&local->chanctx_mtx);
-               return;
-       }
-       chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
-                              struct ieee80211_chanctx, conf);
-       if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
+       res = ieee80211_vif_reserve_chanctx(sdata, &csa_ie.chandef,
+                                           chanctx->mode, false);
+       if (res) {
                sdata_info(sdata,
-                          "channel switch with multiple interfaces on the same channel, disconnecting\n");
+                          "failed to reserve channel context for channel switch, disconnecting (err=%d)\n",
+                          res);
                ieee80211_queue_work(&local->hw,
                                     &ifmgd->csa_connection_drop_work);
                mutex_unlock(&local->chanctx_mtx);
+               mutex_unlock(&local->mtx);
                return;
        }
        mutex_unlock(&local->chanctx_mtx);
 
-       sdata->csa_chandef = csa_ie.chandef;
-
-       mutex_lock(&local->mtx);
        sdata->vif.csa_active = true;
+       sdata->csa_chandef = csa_ie.chandef;
        sdata->csa_block_tx = csa_ie.mode;
 
        if (sdata->csa_block_tx)
-               ieee80211_stop_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+               ieee80211_stop_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
        mutex_unlock(&local->mtx);
 
        if (local->ops->channel_switch) {
@@ -1385,7 +1416,8 @@ void ieee80211_dynamic_ps_disable_work(struct work_struct *work)
 
        ieee80211_wake_queues_by_reason(&local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_PS);
+                                       IEEE80211_QUEUE_STOP_REASON_PS,
+                                       false);
 }
 
 void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
@@ -1830,10 +1862,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        ieee80211_vif_release_channel(sdata);
 
        sdata->vif.csa_active = false;
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
        mutex_unlock(&local->mtx);
 
        sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
@@ -2079,10 +2112,11 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
-       if (!ieee80211_csa_needs_block_tx(local))
-               ieee80211_wake_queues_by_reason(&local->hw,
-                                       IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_CSA);
+       if (sdata->csa_block_tx) {
+               ieee80211_wake_vif_queues(local, sdata,
+                                         IEEE80211_QUEUE_STOP_REASON_CSA);
+               sdata->csa_block_tx = false;
+       }
        mutex_unlock(&local->mtx);
 
        cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
index 7a17decd27f91af8646da20b9ab75fc3e303e3c4..ff20b2ebdb3044207087930d76400421d5fda213 100644 (file)
@@ -119,7 +119,8 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
         * before sending nullfunc to enable powersave at the AP.
         */
        ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
+                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
+                                       false);
        ieee80211_flush_queues(local, NULL);
 
        mutex_lock(&local->iflist_mtx);
@@ -182,7 +183,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local)
        mutex_unlock(&local->iflist_mtx);
 
        ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL);
+                                       IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
+                                       false);
 }
 
 void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
index d478b880a0afd676dae699d5b5541c150a7e819f..4c5192e0d66c7d5ae99913c2e9f2e27ff1626d71 100644 (file)
@@ -35,7 +35,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
        ieee80211_stop_queues_by_reason(hw,
                                        IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+                                       false);
 
        /* flush out all packets */
        synchronize_net();
@@ -74,7 +75,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                        }
                        ieee80211_wake_queues_by_reason(hw,
                                        IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+                                       false);
                        return err;
                } else if (err > 0) {
                        WARN_ON(err != 1);
index 9aa2a1190a86353a25deca879018b2750bdefad9..18babe30283212c18ddb113f3295d211a310d014 100644 (file)
@@ -143,19 +143,6 @@ void rate_control_deinitialize(struct ieee80211_local *local);
 
 
 /* Rate control algorithms */
-#ifdef CONFIG_MAC80211_RC_PID
-int rc80211_pid_init(void);
-void rc80211_pid_exit(void);
-#else
-static inline int rc80211_pid_init(void)
-{
-       return 0;
-}
-static inline void rc80211_pid_exit(void)
-{
-}
-#endif
-
 #ifdef CONFIG_MAC80211_RC_MINSTREL
 int rc80211_minstrel_init(void);
 void rc80211_minstrel_exit(void);
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
deleted file mode 100644 (file)
index 19111c7..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
- * Copyright 2007, Stefano Brivio <stefano.brivio@polimi.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef RC80211_PID_H
-#define RC80211_PID_H
-
-/* Sampling period for measuring percentage of failed frames in ms. */
-#define RC_PID_INTERVAL                        125
-
-/* Exponential averaging smoothness (used for I part of PID controller) */
-#define RC_PID_SMOOTHING_SHIFT         3
-#define RC_PID_SMOOTHING               (1 << RC_PID_SMOOTHING_SHIFT)
-
-/* Sharpening factor (used for D part of PID controller) */
-#define RC_PID_SHARPENING_FACTOR       0
-#define RC_PID_SHARPENING_DURATION     0
-
-/* Fixed point arithmetic shifting amount. */
-#define RC_PID_ARITH_SHIFT             8
-
-/* Proportional PID component coefficient. */
-#define RC_PID_COEFF_P                 15
-/* Integral PID component coefficient. */
-#define RC_PID_COEFF_I                 9
-/* Derivative PID component coefficient. */
-#define RC_PID_COEFF_D                 15
-
-/* Target failed frames rate for the PID controller. NB: This effectively gives
- * maximum failed frames percentage we're willing to accept. If the wireless
- * link quality is good, the controller will fail to adjust failed frames
- * percentage to the target. This is intentional.
- */
-#define RC_PID_TARGET_PF               14
-
-/* Rate behaviour normalization quantity over time. */
-#define RC_PID_NORM_OFFSET             3
-
-/* Push high rates right after loading. */
-#define RC_PID_FAST_START              0
-
-/* Arithmetic right shift for positive and negative values for ISO C. */
-#define RC_PID_DO_ARITH_RIGHT_SHIFT(x, y) \
-       ((x) < 0 ? -((-(x)) >> (y)) : (x) >> (y))
-
-enum rc_pid_event_type {
-       RC_PID_EVENT_TYPE_TX_STATUS,
-       RC_PID_EVENT_TYPE_RATE_CHANGE,
-       RC_PID_EVENT_TYPE_TX_RATE,
-       RC_PID_EVENT_TYPE_PF_SAMPLE,
-};
-
-union rc_pid_event_data {
-       /* RC_PID_EVENT_TX_STATUS */
-       struct {
-               u32 flags;
-               struct ieee80211_tx_info tx_status;
-       };
-       /* RC_PID_EVENT_TYPE_RATE_CHANGE */
-       /* RC_PID_EVENT_TYPE_TX_RATE */
-       struct {
-               int index;
-               int rate;
-       };
-       /* RC_PID_EVENT_TYPE_PF_SAMPLE */
-       struct {
-               s32 pf_sample;
-               s32 prop_err;
-               s32 int_err;
-               s32 der_err;
-       };
-};
-
-struct rc_pid_event {
-       /* The time when the event occurred */
-       unsigned long timestamp;
-
-       /* Event ID number */
-       unsigned int id;
-
-       /* Type of event */
-       enum rc_pid_event_type type;
-
-       /* type specific data */
-       union rc_pid_event_data data;
-};
-
-/* Size of the event ring buffer. */
-#define RC_PID_EVENT_RING_SIZE 32
-
-struct rc_pid_event_buffer {
-       /* Counter that generates event IDs */
-       unsigned int ev_count;
-
-       /* Ring buffer of events */
-       struct rc_pid_event ring[RC_PID_EVENT_RING_SIZE];
-
-       /* Index to the entry in events_buf to be reused */
-       unsigned int next_entry;
-
-       /* Lock that guards against concurrent access to this buffer struct */
-       spinlock_t lock;
-
-       /* Wait queue for poll/select and blocking I/O */
-       wait_queue_head_t waitqueue;
-};
-
-struct rc_pid_events_file_info {
-       /* The event buffer we read */
-       struct rc_pid_event_buffer *events;
-
-       /* The entry we have should read next */
-       unsigned int next_entry;
-};
-
-/**
- * struct rc_pid_debugfs_entries - tunable parameters
- *
- * Algorithm parameters, tunable via debugfs.
- * @target: target percentage for failed frames
- * @sampling_period: error sampling interval in milliseconds
- * @coeff_p: absolute value of the proportional coefficient
- * @coeff_i: absolute value of the integral coefficient
- * @coeff_d: absolute value of the derivative coefficient
- * @smoothing_shift: absolute value of the integral smoothing factor (i.e.
- *     amount of smoothing introduced by the exponential moving average)
- * @sharpen_factor: absolute value of the derivative sharpening factor (i.e.
- *     amount of emphasis given to the derivative term after low activity
- *     events)
- * @sharpen_duration: duration of the sharpening effect after the detected low
- *     activity event, relative to sampling_period
- * @norm_offset: amount of normalization periodically performed on the learnt
- *     rate behaviour values (lower means we should trust more what we learnt
- *     about behaviour of rates, higher means we should trust more the natural
- *     ordering of rates)
- */
-struct rc_pid_debugfs_entries {
-       struct dentry *target;
-       struct dentry *sampling_period;
-       struct dentry *coeff_p;
-       struct dentry *coeff_i;
-       struct dentry *coeff_d;
-       struct dentry *smoothing_shift;
-       struct dentry *sharpen_factor;
-       struct dentry *sharpen_duration;
-       struct dentry *norm_offset;
-};
-
-void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
-                                     struct ieee80211_tx_info *stat);
-
-void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
-                                              int index, int rate);
-
-void rate_control_pid_event_tx_rate(struct rc_pid_event_buffer *buf,
-                                          int index, int rate);
-
-void rate_control_pid_event_pf_sample(struct rc_pid_event_buffer *buf,
-                                            s32 pf_sample, s32 prop_err,
-                                            s32 int_err, s32 der_err);
-
-void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta,
-                                            struct dentry *dir);
-
-void rate_control_pid_remove_sta_debugfs(void *priv, void *priv_sta);
-
-struct rc_pid_sta_info {
-       unsigned long last_change;
-       unsigned long last_sample;
-
-       u32 tx_num_failed;
-       u32 tx_num_xmit;
-
-       int txrate_idx;
-
-       /* Average failed frames percentage error (i.e. actual vs. target
-        * percentage), scaled by RC_PID_SMOOTHING. This value is computed
-        * using using an exponential weighted average technique:
-        *
-        *           (RC_PID_SMOOTHING - 1) * err_avg_old + err
-        * err_avg = ------------------------------------------
-        *                       RC_PID_SMOOTHING
-        *
-        * where err_avg is the new approximation, err_avg_old the previous one
-        * and err is the error w.r.t. to the current failed frames percentage
-        * sample. Note that the bigger RC_PID_SMOOTHING the more weight is
-        * given to the previous estimate, resulting in smoother behavior (i.e.
-        * corresponding to a longer integration window).
-        *
-        * For computation, we actually don't use the above formula, but this
-        * one:
-        *
-        * err_avg_scaled = err_avg_old_scaled - err_avg_old + err
-        *
-        * where:
-        *      err_avg_scaled = err * RC_PID_SMOOTHING
-        *      err_avg_old_scaled = err_avg_old * RC_PID_SMOOTHING
-        *
-        * This avoids floating point numbers and the per_failed_old value can
-        * easily be obtained by shifting per_failed_old_scaled right by
-        * RC_PID_SMOOTHING_SHIFT.
-        */
-       s32 err_avg_sc;
-
-       /* Last framed failes percentage sample. */
-       u32 last_pf;
-
-       /* Sharpening needed. */
-       u8 sharp_cnt;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       /* Event buffer */
-       struct rc_pid_event_buffer events;
-
-       /* Events debugfs file entry */
-       struct dentry *events_entry;
-#endif
-};
-
-/* Algorithm parameters. We keep them on a per-algorithm approach, so they can
- * be tuned individually for each interface.
- */
-struct rc_pid_rateinfo {
-
-       /* Map sorted rates to rates in ieee80211_hw_mode. */
-       int index;
-
-       /* Map rates in ieee80211_hw_mode to sorted rates. */
-       int rev_index;
-
-       /* Did we do any measurement on this rate? */
-       bool valid;
-
-       /* Comparison with the lowest rate. */
-       int diff;
-};
-
-struct rc_pid_info {
-
-       /* The failed frames percentage target. */
-       unsigned int target;
-
-       /* Rate at which failed frames percentage is sampled in 0.001s. */
-       unsigned int sampling_period;
-
-       /* P, I and D coefficients. */
-       int coeff_p;
-       int coeff_i;
-       int coeff_d;
-
-       /* Exponential averaging shift. */
-       unsigned int smoothing_shift;
-
-       /* Sharpening factor and duration. */
-       unsigned int sharpen_factor;
-       unsigned int sharpen_duration;
-
-       /* Normalization offset. */
-       unsigned int norm_offset;
-
-       /* Rates information. */
-       struct rc_pid_rateinfo *rinfo;
-
-       /* Index of the last used rate. */
-       int oldrate;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       /* Debugfs entries created for the parameters above. */
-       struct rc_pid_debugfs_entries dentries;
-#endif
-};
-
-#endif /* RC80211_PID_H */
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
deleted file mode 100644 (file)
index d0da2a7..0000000
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- * Copyright 2002-2005, Instant802 Networks, Inc.
- * Copyright 2005, Devicescape Software, Inc.
- * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
- * Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-#include "rate.h"
-#include "mesh.h"
-#include "rc80211_pid.h"
-
-
-/* This is an implementation of a TX rate control algorithm that uses a PID
- * controller. Given a target failed frames rate, the controller decides about
- * TX rate changes to meet the target failed frames rate.
- *
- * The controller basically computes the following:
- *
- * adj = CP * err + CI * err_avg + CD * (err - last_err) * (1 + sharpening)
- *
- * where
- *     adj     adjustment value that is used to switch TX rate (see below)
- *     err     current error: target vs. current failed frames percentage
- *     last_err        last error
- *     err_avg average (i.e. poor man's integral) of recent errors
- *     sharpening      non-zero when fast response is needed (i.e. right after
- *                     association or no frames sent for a long time), heading
- *                     to zero over time
- *     CP      Proportional coefficient
- *     CI      Integral coefficient
- *     CD      Derivative coefficient
- *
- * CP, CI, CD are subject to careful tuning.
- *
- * The integral component uses a exponential moving average approach instead of
- * an actual sliding window. The advantage is that we don't need to keep an
- * array of the last N error values and computation is easier.
- *
- * Once we have the adj value, we map it to a rate by means of a learning
- * algorithm. This algorithm keeps the state of the percentual failed frames
- * difference between rates. The behaviour of the lowest available rate is kept
- * as a reference value, and every time we switch between two rates, we compute
- * the difference between the failed frames each rate exhibited. By doing so,
- * we compare behaviours which different rates exhibited in adjacent timeslices,
- * thus the comparison is minimally affected by external conditions. This
- * difference gets propagated to the whole set of measurements, so that the
- * reference is always the same. Periodically, we normalize this set so that
- * recent events weigh the most. By comparing the adj value with this set, we
- * avoid pejorative switches to lower rates and allow for switches to higher
- * rates if they behaved well.
- *
- * Note that for the computations we use a fixed-point representation to avoid
- * floating point arithmetic. Hence, all values are shifted left by
- * RC_PID_ARITH_SHIFT.
- */
-
-
-/* Adjust the rate while ensuring that we won't switch to a lower rate if it
- * exhibited a worse failed frames behaviour and we'll choose the highest rate
- * whose failed frames behaviour is not worse than the one of the original rate
- * target. While at it, check that the new rate is valid. */
-static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband,
-                                        struct ieee80211_sta *sta,
-                                        struct rc_pid_sta_info *spinfo, int adj,
-                                        struct rc_pid_rateinfo *rinfo)
-{
-       int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
-       int cur = spinfo->txrate_idx;
-
-       band = sband->band;
-       n_bitrates = sband->n_bitrates;
-
-       /* Map passed arguments to sorted values. */
-       cur_sorted = rinfo[cur].rev_index;
-       new_sorted = cur_sorted + adj;
-
-       /* Check limits. */
-       if (new_sorted < 0)
-               new_sorted = rinfo[0].rev_index;
-       else if (new_sorted >= n_bitrates)
-               new_sorted = rinfo[n_bitrates - 1].rev_index;
-
-       tmp = new_sorted;
-
-       if (adj < 0) {
-               /* Ensure that the rate decrease isn't disadvantageous. */
-               for (probe = cur_sorted; probe >= new_sorted; probe--)
-                       if (rinfo[probe].diff <= rinfo[cur_sorted].diff &&
-                           rate_supported(sta, band, rinfo[probe].index))
-                               tmp = probe;
-       } else {
-               /* Look for rate increase with zero (or below) cost. */
-               for (probe = new_sorted + 1; probe < n_bitrates; probe++)
-                       if (rinfo[probe].diff <= rinfo[new_sorted].diff &&
-                           rate_supported(sta, band, rinfo[probe].index))
-                               tmp = probe;
-       }
-
-       /* Fit the rate found to the nearest supported rate. */
-       do {
-               if (rate_supported(sta, band, rinfo[tmp].index)) {
-                       spinfo->txrate_idx = rinfo[tmp].index;
-                       break;
-               }
-               if (adj < 0)
-                       tmp--;
-               else
-                       tmp++;
-       } while (tmp < n_bitrates && tmp >= 0);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       rate_control_pid_event_rate_change(&spinfo->events,
-               spinfo->txrate_idx,
-               sband->bitrates[spinfo->txrate_idx].bitrate);
-#endif
-}
-
-/* Normalize the failed frames per-rate differences. */
-static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l)
-{
-       int i, norm_offset = pinfo->norm_offset;
-       struct rc_pid_rateinfo *r = pinfo->rinfo;
-
-       if (r[0].diff > norm_offset)
-               r[0].diff -= norm_offset;
-       else if (r[0].diff < -norm_offset)
-               r[0].diff += norm_offset;
-       for (i = 0; i < l - 1; i++)
-               if (r[i + 1].diff > r[i].diff + norm_offset)
-                       r[i + 1].diff -= norm_offset;
-               else if (r[i + 1].diff <= r[i].diff)
-                       r[i + 1].diff += norm_offset;
-}
-
-static void rate_control_pid_sample(struct rc_pid_info *pinfo,
-                                   struct ieee80211_supported_band *sband,
-                                   struct ieee80211_sta *sta,
-                                   struct rc_pid_sta_info *spinfo)
-{
-       struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
-       u32 pf;
-       s32 err_avg;
-       u32 err_prop;
-       u32 err_int;
-       u32 err_der;
-       int adj, i, j, tmp;
-       unsigned long period;
-
-       /* In case nothing happened during the previous control interval, turn
-        * the sharpening factor on. */
-       period = msecs_to_jiffies(pinfo->sampling_period);
-       if (jiffies - spinfo->last_sample > 2 * period)
-               spinfo->sharp_cnt = pinfo->sharpen_duration;
-
-       spinfo->last_sample = jiffies;
-
-       /* This should never happen, but in case, we assume the old sample is
-        * still a good measurement and copy it. */
-       if (unlikely(spinfo->tx_num_xmit == 0))
-               pf = spinfo->last_pf;
-       else
-               pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
-
-       spinfo->tx_num_xmit = 0;
-       spinfo->tx_num_failed = 0;
-
-       /* If we just switched rate, update the rate behaviour info. */
-       if (pinfo->oldrate != spinfo->txrate_idx) {
-
-               i = rinfo[pinfo->oldrate].rev_index;
-               j = rinfo[spinfo->txrate_idx].rev_index;
-
-               tmp = (pf - spinfo->last_pf);
-               tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
-
-               rinfo[j].diff = rinfo[i].diff + tmp;
-               pinfo->oldrate = spinfo->txrate_idx;
-       }
-       rate_control_pid_normalize(pinfo, sband->n_bitrates);
-
-       /* Compute the proportional, integral and derivative errors. */
-       err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT;
-
-       err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift;
-       spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
-       err_int = spinfo->err_avg_sc >> pinfo->smoothing_shift;
-
-       err_der = (pf - spinfo->last_pf) *
-                 (1 + pinfo->sharpen_factor * spinfo->sharp_cnt);
-       spinfo->last_pf = pf;
-       if (spinfo->sharp_cnt)
-                       spinfo->sharp_cnt--;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       rate_control_pid_event_pf_sample(&spinfo->events, pf, err_prop, err_int,
-                                        err_der);
-#endif
-
-       /* Compute the controller output. */
-       adj = (err_prop * pinfo->coeff_p + err_int * pinfo->coeff_i
-             + err_der * pinfo->coeff_d);
-       adj = RC_PID_DO_ARITH_RIGHT_SHIFT(adj, 2 * RC_PID_ARITH_SHIFT);
-
-       /* Change rate. */
-       if (adj)
-               rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo);
-}
-
-static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband,
-                                      struct ieee80211_sta *sta, void *priv_sta,
-                                      struct sk_buff *skb)
-{
-       struct rc_pid_info *pinfo = priv;
-       struct rc_pid_sta_info *spinfo = priv_sta;
-       unsigned long period;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
-       if (!spinfo)
-               return;
-
-       /* Ignore all frames that were sent with a different rate than the rate
-        * we currently advise mac80211 to use. */
-       if (info->status.rates[0].idx != spinfo->txrate_idx)
-               return;
-
-       spinfo->tx_num_xmit++;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       rate_control_pid_event_tx_status(&spinfo->events, info);
-#endif
-
-       /* We count frames that totally failed to be transmitted as two bad
-        * frames, those that made it out but had some retries as one good and
-        * one bad frame. */
-       if (!(info->flags & IEEE80211_TX_STAT_ACK)) {
-               spinfo->tx_num_failed += 2;
-               spinfo->tx_num_xmit++;
-       } else if (info->status.rates[0].count > 1) {
-               spinfo->tx_num_failed++;
-               spinfo->tx_num_xmit++;
-       }
-
-       /* Update PID controller state. */
-       period = msecs_to_jiffies(pinfo->sampling_period);
-       if (time_after(jiffies, spinfo->last_sample + period))
-               rate_control_pid_sample(pinfo, sband, sta, spinfo);
-}
-
-static void
-rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
-                         void *priv_sta,
-                         struct ieee80211_tx_rate_control *txrc)
-{
-       struct sk_buff *skb = txrc->skb;
-       struct ieee80211_supported_band *sband = txrc->sband;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct rc_pid_sta_info *spinfo = priv_sta;
-       int rateidx;
-
-       if (txrc->rts)
-               info->control.rates[0].count =
-                       txrc->hw->conf.long_frame_max_tx_count;
-       else
-               info->control.rates[0].count =
-                       txrc->hw->conf.short_frame_max_tx_count;
-
-       /* Send management frames and NO_ACK data using lowest rate. */
-       if (rate_control_send_low(sta, priv_sta, txrc))
-               return;
-
-       rateidx = spinfo->txrate_idx;
-
-       if (rateidx >= sband->n_bitrates)
-               rateidx = sband->n_bitrates - 1;
-
-       info->control.rates[0].idx = rateidx;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       rate_control_pid_event_tx_rate(&spinfo->events,
-               rateidx, sband->bitrates[rateidx].bitrate);
-#endif
-}
-
-static void
-rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
-                          struct cfg80211_chan_def *chandef,
-                          struct ieee80211_sta *sta, void *priv_sta)
-{
-       struct rc_pid_sta_info *spinfo = priv_sta;
-       struct rc_pid_info *pinfo = priv;
-       struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
-       int i, j, tmp;
-       bool s;
-
-       /* TODO: This routine should consider using RSSI from previous packets
-        * as we need to have IEEE 802.1X auth succeed immediately after assoc..
-        * Until that method is implemented, we will use the lowest supported
-        * rate as a workaround. */
-
-       /* Sort the rates. This is optimized for the most common case (i.e.
-        * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
-        * mapping too. */
-       for (i = 0; i < sband->n_bitrates; i++) {
-               rinfo[i].index = i;
-               rinfo[i].rev_index = i;
-               if (RC_PID_FAST_START)
-                       rinfo[i].diff = 0;
-               else
-                       rinfo[i].diff = i * pinfo->norm_offset;
-       }
-       for (i = 1; i < sband->n_bitrates; i++) {
-               s = false;
-               for (j = 0; j < sband->n_bitrates - i; j++)
-                       if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
-                                    sband->bitrates[rinfo[j + 1].index].bitrate)) {
-                               tmp = rinfo[j].index;
-                               rinfo[j].index = rinfo[j + 1].index;
-                               rinfo[j + 1].index = tmp;
-                               rinfo[rinfo[j].index].rev_index = j;
-                               rinfo[rinfo[j + 1].index].rev_index = j + 1;
-                               s = true;
-                       }
-               if (!s)
-                       break;
-       }
-
-       spinfo->txrate_idx = rate_lowest_index(sband, sta);
-}
-
-static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
-                                   struct dentry *debugfsdir)
-{
-       struct rc_pid_info *pinfo;
-       struct rc_pid_rateinfo *rinfo;
-       struct ieee80211_supported_band *sband;
-       int i, max_rates = 0;
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct rc_pid_debugfs_entries *de;
-#endif
-
-       pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
-       if (!pinfo)
-               return NULL;
-
-       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
-               sband = hw->wiphy->bands[i];
-               if (sband && sband->n_bitrates > max_rates)
-                       max_rates = sband->n_bitrates;
-       }
-
-       rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
-       if (!rinfo) {
-               kfree(pinfo);
-               return NULL;
-       }
-
-       pinfo->target = RC_PID_TARGET_PF;
-       pinfo->sampling_period = RC_PID_INTERVAL;
-       pinfo->coeff_p = RC_PID_COEFF_P;
-       pinfo->coeff_i = RC_PID_COEFF_I;
-       pinfo->coeff_d = RC_PID_COEFF_D;
-       pinfo->smoothing_shift = RC_PID_SMOOTHING_SHIFT;
-       pinfo->sharpen_factor = RC_PID_SHARPENING_FACTOR;
-       pinfo->sharpen_duration = RC_PID_SHARPENING_DURATION;
-       pinfo->norm_offset = RC_PID_NORM_OFFSET;
-       pinfo->rinfo = rinfo;
-       pinfo->oldrate = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       de = &pinfo->dentries;
-       de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
-                                       debugfsdir, &pinfo->target);
-       de->sampling_period = debugfs_create_u32("sampling_period",
-                                                S_IRUSR | S_IWUSR, debugfsdir,
-                                                &pinfo->sampling_period);
-       de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR,
-                                        debugfsdir, (u32 *)&pinfo->coeff_p);
-       de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR,
-                                        debugfsdir, (u32 *)&pinfo->coeff_i);
-       de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR,
-                                        debugfsdir, (u32 *)&pinfo->coeff_d);
-       de->smoothing_shift = debugfs_create_u32("smoothing_shift",
-                                                S_IRUSR | S_IWUSR, debugfsdir,
-                                                &pinfo->smoothing_shift);
-       de->sharpen_factor = debugfs_create_u32("sharpen_factor",
-                                              S_IRUSR | S_IWUSR, debugfsdir,
-                                              &pinfo->sharpen_factor);
-       de->sharpen_duration = debugfs_create_u32("sharpen_duration",
-                                                 S_IRUSR | S_IWUSR, debugfsdir,
-                                                 &pinfo->sharpen_duration);
-       de->norm_offset = debugfs_create_u32("norm_offset",
-                                            S_IRUSR | S_IWUSR, debugfsdir,
-                                            &pinfo->norm_offset);
-#endif
-
-       return pinfo;
-}
-
-static void rate_control_pid_free(void *priv)
-{
-       struct rc_pid_info *pinfo = priv;
-#ifdef CONFIG_MAC80211_DEBUGFS
-       struct rc_pid_debugfs_entries *de = &pinfo->dentries;
-
-       debugfs_remove(de->norm_offset);
-       debugfs_remove(de->sharpen_duration);
-       debugfs_remove(de->sharpen_factor);
-       debugfs_remove(de->smoothing_shift);
-       debugfs_remove(de->coeff_d);
-       debugfs_remove(de->coeff_i);
-       debugfs_remove(de->coeff_p);
-       debugfs_remove(de->sampling_period);
-       debugfs_remove(de->target);
-#endif
-
-       kfree(pinfo->rinfo);
-       kfree(pinfo);
-}
-
-static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta,
-                                       gfp_t gfp)
-{
-       struct rc_pid_sta_info *spinfo;
-
-       spinfo = kzalloc(sizeof(*spinfo), gfp);
-       if (spinfo == NULL)
-               return NULL;
-
-       spinfo->last_sample = jiffies;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-       spin_lock_init(&spinfo->events.lock);
-       init_waitqueue_head(&spinfo->events.waitqueue);
-#endif
-
-       return spinfo;
-}
-
-static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
-                                     void *priv_sta)
-{
-       kfree(priv_sta);
-}
-
-static const struct rate_control_ops mac80211_rcpid = {
-       .name = "pid",
-       .tx_status = rate_control_pid_tx_status,
-       .get_rate = rate_control_pid_get_rate,
-       .rate_init = rate_control_pid_rate_init,
-       .alloc = rate_control_pid_alloc,
-       .free = rate_control_pid_free,
-       .alloc_sta = rate_control_pid_alloc_sta,
-       .free_sta = rate_control_pid_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
-       .add_sta_debugfs = rate_control_pid_add_sta_debugfs,
-       .remove_sta_debugfs = rate_control_pid_remove_sta_debugfs,
-#endif
-};
-
-int __init rc80211_pid_init(void)
-{
-       return ieee80211_rate_control_register(&mac80211_rcpid);
-}
-
-void rc80211_pid_exit(void)
-{
-       ieee80211_rate_control_unregister(&mac80211_rcpid);
-}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
deleted file mode 100644 (file)
index 6ff1346..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-#include <linux/poll.h>
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <net/mac80211.h>
-#include "rate.h"
-
-#include "rc80211_pid.h"
-
-static void rate_control_pid_event(struct rc_pid_event_buffer *buf,
-                                  enum rc_pid_event_type type,
-                                  union rc_pid_event_data *data)
-{
-       struct rc_pid_event *ev;
-       unsigned long status;
-
-       spin_lock_irqsave(&buf->lock, status);
-       ev = &(buf->ring[buf->next_entry]);
-       buf->next_entry = (buf->next_entry + 1) % RC_PID_EVENT_RING_SIZE;
-
-       ev->timestamp = jiffies;
-       ev->id = buf->ev_count++;
-       ev->type = type;
-       ev->data = *data;
-
-       spin_unlock_irqrestore(&buf->lock, status);
-
-       wake_up_all(&buf->waitqueue);
-}
-
-void rate_control_pid_event_tx_status(struct rc_pid_event_buffer *buf,
-                                     struct ieee80211_tx_info *stat)
-{
-       union rc_pid_event_data evd;
-
-       evd.flags = stat->flags;
-       memcpy(&evd.tx_status, stat, sizeof(struct ieee80211_tx_info));
-       rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_STATUS, &evd);
-}
-
-void rate_control_pid_event_rate_change(struct rc_pid_event_buffer *buf,
-                                              int index, int rate)
-{
-       union rc_pid_event_data evd;
-
-       evd.index = index;
-       evd.rate = rate;
-       rate_control_pid_event(buf, RC_PID_EVENT_TYPE_RATE_CHANGE, &evd);
-}
-
-void rate_control_pid_event_tx_rate(struct rc_pid_event_buffer *buf,
-                                          int index, int rate)
-{
-       union rc_pid_event_data evd;
-
-       evd.index = index;
-       evd.rate = rate;
-       rate_control_pid_event(buf, RC_PID_EVENT_TYPE_TX_RATE, &evd);
-}
-
-void rate_control_pid_event_pf_sample(struct rc_pid_event_buffer *buf,
-                                            s32 pf_sample, s32 prop_err,
-                                            s32 int_err, s32 der_err)
-{
-       union rc_pid_event_data evd;
-
-       evd.pf_sample = pf_sample;
-       evd.prop_err = prop_err;
-       evd.int_err = int_err;
-       evd.der_err = der_err;
-       rate_control_pid_event(buf, RC_PID_EVENT_TYPE_PF_SAMPLE, &evd);
-}
-
-static int rate_control_pid_events_open(struct inode *inode, struct file *file)
-{
-       struct rc_pid_sta_info *sinfo = inode->i_private;
-       struct rc_pid_event_buffer *events = &sinfo->events;
-       struct rc_pid_events_file_info *file_info;
-       unsigned long status;
-
-       /* Allocate a state struct */
-       file_info = kmalloc(sizeof(*file_info), GFP_KERNEL);
-       if (file_info == NULL)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&events->lock, status);
-
-       file_info->next_entry = events->next_entry;
-       file_info->events = events;
-
-       spin_unlock_irqrestore(&events->lock, status);
-
-       file->private_data = file_info;
-
-       return 0;
-}
-
-static int rate_control_pid_events_release(struct inode *inode,
-                                          struct file *file)
-{
-       struct rc_pid_events_file_info *file_info = file->private_data;
-
-       kfree(file_info);
-
-       return 0;
-}
-
-static unsigned int rate_control_pid_events_poll(struct file *file,
-                                                poll_table *wait)
-{
-       struct rc_pid_events_file_info *file_info = file->private_data;
-
-       poll_wait(file, &file_info->events->waitqueue, wait);
-
-       return POLLIN | POLLRDNORM;
-}
-
-#define RC_PID_PRINT_BUF_SIZE 64
-
-static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
-                                           size_t length, loff_t *offset)
-{
-       struct rc_pid_events_file_info *file_info = file->private_data;
-       struct rc_pid_event_buffer *events = file_info->events;
-       struct rc_pid_event *ev;
-       char pb[RC_PID_PRINT_BUF_SIZE];
-       int ret;
-       int p;
-       unsigned long status;
-
-       /* Check if there is something to read. */
-       if (events->next_entry == file_info->next_entry) {
-               if (file->f_flags & O_NONBLOCK)
-                       return -EAGAIN;
-
-               /* Wait */
-               ret = wait_event_interruptible(events->waitqueue,
-                               events->next_entry != file_info->next_entry);
-
-               if (ret)
-                       return ret;
-       }
-
-       /* Write out one event per call. I don't care whether it's a little
-        * inefficient, this is debugging code anyway. */
-       spin_lock_irqsave(&events->lock, status);
-
-       /* Get an event */
-       ev = &(events->ring[file_info->next_entry]);
-       file_info->next_entry = (file_info->next_entry + 1) %
-                               RC_PID_EVENT_RING_SIZE;
-
-       /* Print information about the event. Note that userspace needs to
-        * provide large enough buffers. */
-       length = length < RC_PID_PRINT_BUF_SIZE ?
-                length : RC_PID_PRINT_BUF_SIZE;
-       p = scnprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
-       switch (ev->type) {
-       case RC_PID_EVENT_TYPE_TX_STATUS:
-               p += scnprintf(pb + p, length - p, "tx_status %u %u",
-                              !(ev->data.flags & IEEE80211_TX_STAT_ACK),
-                              ev->data.tx_status.status.rates[0].idx);
-               break;
-       case RC_PID_EVENT_TYPE_RATE_CHANGE:
-               p += scnprintf(pb + p, length - p, "rate_change %d %d",
-                              ev->data.index, ev->data.rate);
-               break;
-       case RC_PID_EVENT_TYPE_TX_RATE:
-               p += scnprintf(pb + p, length - p, "tx_rate %d %d",
-                              ev->data.index, ev->data.rate);
-               break;
-       case RC_PID_EVENT_TYPE_PF_SAMPLE:
-               p += scnprintf(pb + p, length - p,
-                              "pf_sample %d %d %d %d",
-                              ev->data.pf_sample, ev->data.prop_err,
-                              ev->data.int_err, ev->data.der_err);
-               break;
-       }
-       p += scnprintf(pb + p, length - p, "\n");
-
-       spin_unlock_irqrestore(&events->lock, status);
-
-       if (copy_to_user(buf, pb, p))
-               return -EFAULT;
-
-       return p;
-}
-
-#undef RC_PID_PRINT_BUF_SIZE
-
-static const struct file_operations rc_pid_fop_events = {
-       .owner = THIS_MODULE,
-       .read = rate_control_pid_events_read,
-       .poll = rate_control_pid_events_poll,
-       .open = rate_control_pid_events_open,
-       .release = rate_control_pid_events_release,
-       .llseek = noop_llseek,
-};
-
-void rate_control_pid_add_sta_debugfs(void *priv, void *priv_sta,
-                                            struct dentry *dir)
-{
-       struct rc_pid_sta_info *spinfo = priv_sta;
-
-       spinfo->events_entry = debugfs_create_file("rc_pid_events", S_IRUGO,
-                                                  dir, spinfo,
-                                                  &rc_pid_fop_events);
-}
-
-void rate_control_pid_remove_sta_debugfs(void *priv, void *priv_sta)
-{
-       struct rc_pid_sta_info *spinfo = priv_sta;
-
-       debugfs_remove(spinfo->events_entry);
-}
index 394e201cde6d3b6d4375f973937df55395547fea..5f572bed176100d634a64ba5fcc8af181ce543be 100644 (file)
@@ -1107,6 +1107,8 @@ static void sta_ps_end(struct sta_info *sta)
                return;
        }
 
+       set_sta_flag(sta, WLAN_STA_PS_DELIVER);
+       clear_sta_flag(sta, WLAN_STA_PS_STA);
        ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
index f40661eb75b578dd2e409757331c085d36461723..a0a938145dcc87397cb4ecfad5f59b1c5fb28dbc 100644 (file)
@@ -235,38 +235,51 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
 {
        struct cfg80211_scan_request *req = local->scan_req;
        struct cfg80211_chan_def chandef;
-       enum ieee80211_band band;
+       u8 bands_used = 0;
        int i, ielen, n_chans;
 
        if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
                return false;
 
-       do {
-               if (local->hw_scan_band == IEEE80211_NUM_BANDS)
-                       return false;
-
-               band = local->hw_scan_band;
-               n_chans = 0;
+       if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
                for (i = 0; i < req->n_channels; i++) {
-                       if (req->channels[i]->band == band) {
-                               local->hw_scan_req->channels[n_chans] =
+                       local->hw_scan_req->req.channels[i] = req->channels[i];
+                       bands_used |= BIT(req->channels[i]->band);
+               }
+
+               n_chans = req->n_channels;
+       } else {
+               do {
+                       if (local->hw_scan_band == IEEE80211_NUM_BANDS)
+                               return false;
+
+                       n_chans = 0;
+
+                       for (i = 0; i < req->n_channels; i++) {
+                               if (req->channels[i]->band !=
+                                   local->hw_scan_band)
+                                       continue;
+                               local->hw_scan_req->req.channels[n_chans] =
                                                        req->channels[i];
                                n_chans++;
+                               bands_used |= BIT(req->channels[i]->band);
                        }
-               }
 
-               local->hw_scan_band++;
-       } while (!n_chans);
+                       local->hw_scan_band++;
+               } while (!n_chans);
+       }
 
-       local->hw_scan_req->n_channels = n_chans;
+       local->hw_scan_req->req.n_channels = n_chans;
        ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
 
-       ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
+       ielen = ieee80211_build_preq_ies(local,
+                                        (u8 *)local->hw_scan_req->req.ie,
                                         local->hw_scan_ies_bufsize,
-                                        req->ie, req->ie_len, band,
-                                        req->rates[band], &chandef);
-       local->hw_scan_req->ie_len = ielen;
-       local->hw_scan_req->no_cck = req->no_cck;
+                                        &local->hw_scan_req->ies,
+                                        req->ie, req->ie_len,
+                                        bands_used, req->rates, &chandef);
+       local->hw_scan_req->req.ie_len = ielen;
+       local->hw_scan_req->req.no_cck = req->no_cck;
 
        return true;
 }
@@ -291,7 +304,9 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        if (WARN_ON(!local->scan_req))
                return;
 
-       if (hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
+       if (hw_scan && !aborted &&
+           !(local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) &&
+           ieee80211_prep_hw_scan(local)) {
                int rc;
 
                rc = drv_hw_scan(local,
@@ -473,6 +488,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                u8 *ies;
 
                local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
+
+               if (local->hw.flags & IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS) {
+                       int i, n_bands = 0;
+                       u8 bands_counted = 0;
+
+                       for (i = 0; i < req->n_channels; i++) {
+                               if (bands_counted & BIT(req->channels[i]->band))
+                                       continue;
+                               bands_counted |= BIT(req->channels[i]->band);
+                               n_bands++;
+                       }
+
+                       local->hw_scan_ies_bufsize *= n_bands;
+               }
+
                local->hw_scan_req = kmalloc(
                                sizeof(*local->hw_scan_req) +
                                req->n_channels * sizeof(req->channels[0]) +
@@ -480,13 +510,13 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                if (!local->hw_scan_req)
                        return -ENOMEM;
 
-               local->hw_scan_req->ssids = req->ssids;
-               local->hw_scan_req->n_ssids = req->n_ssids;
+               local->hw_scan_req->req.ssids = req->ssids;
+               local->hw_scan_req->req.n_ssids = req->n_ssids;
                ies = (u8 *)local->hw_scan_req +
                        sizeof(*local->hw_scan_req) +
                        req->n_channels * sizeof(req->channels[0]);
-               local->hw_scan_req->ie = ies;
-               local->hw_scan_req->flags = req->flags;
+               local->hw_scan_req->req.ie = ies;
+               local->hw_scan_req->req.flags = req->flags;
 
                local->hw_scan_band = 0;
 
@@ -973,9 +1003,13 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                                        struct cfg80211_sched_scan_request *req)
 {
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_sched_scan_ies sched_scan_ies = {};
+       struct ieee80211_scan_ies sched_scan_ies = {};
        struct cfg80211_chan_def chandef;
-       int ret, i, iebufsz;
+       int ret, i, iebufsz, num_bands = 0;
+       u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+       u8 bands_used = 0;
+       u8 *ie;
+       size_t len;
 
        iebufsz = local->scan_ies_len + req->ie_len;
 
@@ -985,33 +1019,35 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
                return -ENOTSUPP;
 
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
-               if (!local->hw.wiphy->bands[i])
-                       continue;
-
-               sched_scan_ies.ie[i] = kzalloc(iebufsz, GFP_KERNEL);
-               if (!sched_scan_ies.ie[i]) {
-                       ret = -ENOMEM;
-                       goto out_free;
+               if (local->hw.wiphy->bands[i]) {
+                       bands_used |= BIT(i);
+                       rate_masks[i] = (u32) -1;
+                       num_bands++;
                }
+       }
 
-               ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
-
-               sched_scan_ies.len[i] =
-                       ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
-                                                iebufsz, req->ie, req->ie_len,
-                                                i, (u32) -1, &chandef);
+       ie = kzalloc(num_bands * iebufsz, GFP_KERNEL);
+       if (!ie) {
+               ret = -ENOMEM;
+               goto out;
        }
 
+       ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
+       len = ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
+                                      &sched_scan_ies, req->ie,
+                                      req->ie_len, bands_used,
+                                      rate_masks, &chandef);
+
        ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
        if (ret == 0) {
                rcu_assign_pointer(local->sched_scan_sdata, sdata);
                local->sched_scan_req = req;
        }
 
-out_free:
-       while (i > 0)
-               kfree(sched_scan_ies.ie[--i]);
+       kfree(ie);
 
+out:
        if (ret) {
                /* Clean in case of failure after HW restart or upon resume. */
                RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
index a9b46d8ea22ff696623cec4cad55949fc5afd2a1..f41177f58b30c483f95596bc3e9d5acf90b00512 100644 (file)
@@ -100,7 +100,8 @@ static void __cleanup_single_sta(struct sta_info *sta)
        struct ps_data *ps;
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
-           test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+           test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
+           test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
                    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                        ps = &sdata->bss->ps;
@@ -111,6 +112,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
 
                clear_sta_flag(sta, WLAN_STA_PS_STA);
                clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+               clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
 
                atomic_dec(&ps->num_sta_ps);
                sta_info_recalc_tim(sta);
@@ -125,7 +127,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
        if (ieee80211_vif_is_mesh(&sdata->vif))
                mesh_sta_cleanup(sta);
 
-       cancel_work_sync(&sta->drv_unblock_wk);
+       cancel_work_sync(&sta->drv_deliver_wk);
 
        /*
         * Destroy aggregation state here. It would be nice to wait for the
@@ -253,33 +255,23 @@ static void sta_info_hash_add(struct ieee80211_local *local,
        rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
 }
 
-static void sta_unblock(struct work_struct *wk)
+static void sta_deliver_ps_frames(struct work_struct *wk)
 {
        struct sta_info *sta;
 
-       sta = container_of(wk, struct sta_info, drv_unblock_wk);
+       sta = container_of(wk, struct sta_info, drv_deliver_wk);
 
        if (sta->dead)
                return;
 
-       if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
-               local_bh_disable();
+       local_bh_disable();
+       if (!test_sta_flag(sta, WLAN_STA_PS_STA))
                ieee80211_sta_ps_deliver_wakeup(sta);
-               local_bh_enable();
-       } else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
-               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
-
-               local_bh_disable();
+       else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
                ieee80211_sta_ps_deliver_poll_response(sta);
-               local_bh_enable();
-       } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
-               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
-
-               local_bh_disable();
+       else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD))
                ieee80211_sta_ps_deliver_uapsd(sta);
-               local_bh_enable();
-       } else
-               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+       local_bh_enable();
 }
 
 static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -341,7 +333,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_init(&sta->lock);
        spin_lock_init(&sta->ps_lock);
-       INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
+       INIT_WORK(&sta->drv_deliver_wk, sta_deliver_ps_frames);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
@@ -358,7 +350,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        sta->sta_state = IEEE80211_STA_NONE;
 
-       do_posix_clock_monotonic_gettime(&uptime);
+       ktime_get_ts(&uptime);
        sta->last_connected = uptime.tv_sec;
        ewma_init(&sta->avg_signal, 1024, 8);
        for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
@@ -1141,8 +1133,15 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        }
 
        ieee80211_add_pending_skbs(local, &pending);
-       clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
-       clear_sta_flag(sta, WLAN_STA_PS_STA);
+
+       /* now we're no longer in the deliver code */
+       clear_sta_flag(sta, WLAN_STA_PS_DELIVER);
+
+       /* The station might have polled and then woken up before we responded,
+        * so clear these flags now to avoid them sticking around.
+        */
+       clear_sta_flag(sta, WLAN_STA_PSPOLL);
+       clear_sta_flag(sta, WLAN_STA_UAPSD);
        spin_unlock(&sta->ps_lock);
 
        atomic_dec(&ps->num_sta_ps);
@@ -1543,10 +1542,26 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
 
        trace_api_sta_block_awake(sta->local, pubsta, block);
 
-       if (block)
+       if (block) {
                set_sta_flag(sta, WLAN_STA_PS_DRIVER);
-       else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
-               ieee80211_queue_work(hw, &sta->drv_unblock_wk);
+               return;
+       }
+
+       if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
+               return;
+
+       if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               set_sta_flag(sta, WLAN_STA_PS_DELIVER);
+               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+               ieee80211_queue_work(hw, &sta->drv_deliver_wk);
+       } else if (test_sta_flag(sta, WLAN_STA_PSPOLL) ||
+                  test_sta_flag(sta, WLAN_STA_UAPSD)) {
+               /* must be asleep in this case */
+               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+               ieee80211_queue_work(hw, &sta->drv_deliver_wk);
+       } else {
+               clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+       }
 }
 EXPORT_SYMBOL(ieee80211_sta_block_awake);
 
@@ -1704,3 +1719,137 @@ u8 sta_info_tx_streams(struct sta_info *sta)
        return ((ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
                        >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
 }
+
+void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
+{
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_local *local = sdata->local;
+       struct rate_control_ref *ref = local->rate_ctrl;
+       struct timespec uptime;
+       u64 packets = 0;
+       u32 thr = 0;
+       int i, ac;
+
+       sinfo->generation = sdata->local->sta_generation;
+
+       sinfo->filled = STATION_INFO_INACTIVE_TIME |
+                       STATION_INFO_RX_BYTES64 |
+                       STATION_INFO_TX_BYTES64 |
+                       STATION_INFO_RX_PACKETS |
+                       STATION_INFO_TX_PACKETS |
+                       STATION_INFO_TX_RETRIES |
+                       STATION_INFO_TX_FAILED |
+                       STATION_INFO_TX_BITRATE |
+                       STATION_INFO_RX_BITRATE |
+                       STATION_INFO_RX_DROP_MISC |
+                       STATION_INFO_BSS_PARAM |
+                       STATION_INFO_CONNECTED_TIME |
+                       STATION_INFO_STA_FLAGS |
+                       STATION_INFO_BEACON_LOSS_COUNT;
+
+       ktime_get_ts(&uptime);
+       sinfo->connected_time = uptime.tv_sec - sta->last_connected;
+
+       sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
+       sinfo->tx_bytes = 0;
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               sinfo->tx_bytes += sta->tx_bytes[ac];
+               packets += sta->tx_packets[ac];
+       }
+       sinfo->tx_packets = packets;
+       sinfo->rx_bytes = sta->rx_bytes;
+       sinfo->rx_packets = sta->rx_packets;
+       sinfo->tx_retries = sta->tx_retry_count;
+       sinfo->tx_failed = sta->tx_retry_failed;
+       sinfo->rx_dropped_misc = sta->rx_dropped;
+       sinfo->beacon_loss_count = sta->beacon_loss_count;
+
+       if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
+           (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
+               sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
+               if (!local->ops->get_rssi ||
+                   drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
+                       sinfo->signal = (s8)sta->last_signal;
+               sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+       }
+       if (sta->chains) {
+               sinfo->filled |= STATION_INFO_CHAIN_SIGNAL |
+                                STATION_INFO_CHAIN_SIGNAL_AVG;
+
+               sinfo->chains = sta->chains;
+               for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+                       sinfo->chain_signal[i] = sta->chain_signal_last[i];
+                       sinfo->chain_signal_avg[i] =
+                               (s8) -ewma_read(&sta->chain_signal_avg[i]);
+               }
+       }
+
+       sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
+       sta_set_rate_info_rx(sta, &sinfo->rxrate);
+
+       if (ieee80211_vif_is_mesh(&sdata->vif)) {
+#ifdef CONFIG_MAC80211_MESH
+               sinfo->filled |= STATION_INFO_LLID |
+                                STATION_INFO_PLID |
+                                STATION_INFO_PLINK_STATE |
+                                STATION_INFO_LOCAL_PM |
+                                STATION_INFO_PEER_PM |
+                                STATION_INFO_NONPEER_PM;
+
+               sinfo->llid = sta->llid;
+               sinfo->plid = sta->plid;
+               sinfo->plink_state = sta->plink_state;
+               if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
+                       sinfo->filled |= STATION_INFO_T_OFFSET;
+                       sinfo->t_offset = sta->t_offset;
+               }
+               sinfo->local_pm = sta->local_pm;
+               sinfo->peer_pm = sta->peer_pm;
+               sinfo->nonpeer_pm = sta->nonpeer_pm;
+#endif
+       }
+
+       sinfo->bss_param.flags = 0;
+       if (sdata->vif.bss_conf.use_cts_prot)
+               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
+       if (sdata->vif.bss_conf.use_short_preamble)
+               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
+       if (sdata->vif.bss_conf.use_short_slot)
+               sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
+       sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
+       sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
+
+       sinfo->sta_flags.set = 0;
+       sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
+                               BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+                               BIT(NL80211_STA_FLAG_WME) |
+                               BIT(NL80211_STA_FLAG_MFP) |
+                               BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+                               BIT(NL80211_STA_FLAG_ASSOCIATED) |
+                               BIT(NL80211_STA_FLAG_TDLS_PEER);
+       if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+       if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+       if (test_sta_flag(sta, WLAN_STA_WME))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
+       if (test_sta_flag(sta, WLAN_STA_MFP))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
+       if (test_sta_flag(sta, WLAN_STA_AUTH))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
+       if (test_sta_flag(sta, WLAN_STA_ASSOC))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
+       if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+               sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
+
+       /* check if the driver has a SW RC implementation */
+       if (ref && ref->ops->get_expected_throughput)
+               thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv);
+       else
+               thr = drv_get_expected_throughput(local, &sta->sta);
+
+       if (thr != 0) {
+               sinfo->filled |= STATION_INFO_EXPECTED_THROUGHPUT;
+               sinfo->expected_throughput = thr;
+       }
+}
index 4acc5fc402fa30b11e8f26da93be91eeabcc303a..2a04361b2162308358dc5f06796b94fd34aa5c29 100644 (file)
@@ -58,6 +58,8 @@
  * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
  * @WLAN_STA_MPSP_OWNER: local STA is owner of a mesh Peer Service Period.
  * @WLAN_STA_MPSP_RECIPIENT: local STA is recipient of a MPSP.
+ * @WLAN_STA_PS_DELIVER: station woke up, but we're still blocking TX
+ *     until pending frames are delivered
  */
 enum ieee80211_sta_info_flags {
        WLAN_STA_AUTH,
@@ -82,6 +84,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_TOFFSET_KNOWN,
        WLAN_STA_MPSP_OWNER,
        WLAN_STA_MPSP_RECIPIENT,
+       WLAN_STA_PS_DELIVER,
 };
 
 #define ADDBA_RESP_INTERVAL HZ
@@ -265,7 +268,7 @@ struct ieee80211_tx_latency_stat {
  * @last_rx_rate_vht_nss: rx status nss of last data packet
  * @lock: used for locking all fields that require locking, see comments
  *     in the header file.
- * @drv_unblock_wk: used for driver PS unblocking
+ * @drv_deliver_wk: used for delivering frames after driver PS unblocking
  * @listen_interval: listen interval of this station, when we're acting as AP
  * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
  * @ps_lock: used for powersave (when mac80211 is the AP) related locking
@@ -278,7 +281,6 @@ struct ieee80211_tx_latency_stat {
  * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
  * @rx_packets: Number of MSDUs received from this STA
  * @rx_bytes: Number of bytes received from this STA
- * @wep_weak_iv_count: number of weak WEP IVs received from this station
  * @last_rx: time (in jiffies) when last frame was received from this STA
  * @last_connected: time (in seconds) when a station got connected
  * @num_duplicates: number of duplicate frames received from this STA
@@ -303,7 +305,6 @@ struct ieee80211_tx_latency_stat {
  * @plid: Peer link ID
  * @reason: Cancel reason on PLINK_HOLDING state
  * @plink_retries: Retries in establishment
- * @ignore_plink_timer: ignore the peer-link timer (used internally)
  * @plink_state: peer link state
  * @plink_timeout: timeout of peer link
  * @plink_timer: peer link watch timer
@@ -345,7 +346,7 @@ struct sta_info {
        void *rate_ctrl_priv;
        spinlock_t lock;
 
-       struct work_struct drv_unblock_wk;
+       struct work_struct drv_deliver_wk;
 
        u16 listen_interval;
 
@@ -367,7 +368,6 @@ struct sta_info {
        /* Updated from RX path only, no locking requirements */
        unsigned long rx_packets;
        u64 rx_bytes;
-       unsigned long wep_weak_iv_count;
        unsigned long last_rx;
        long last_connected;
        unsigned long num_duplicates;
@@ -418,7 +418,6 @@ struct sta_info {
        u16 plid;
        u16 reason;
        u8 plink_retries;
-       bool ignore_plink_timer;
        enum nl80211_plink_state plink_state;
        u32 plink_timeout;
        struct timer_list plink_timer;
@@ -628,6 +627,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
                          struct rate_info *rinfo);
 void sta_set_rate_info_rx(struct sta_info *sta,
                          struct rate_info *rinfo);
+void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo);
+
 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
                          unsigned long exp_time);
 u8 sta_info_tx_streams(struct sta_info *sta);
index ba29ebc8614121ea90ff5ff1cec44f137ac85a69..aa06dcad336e6d9bcdfff2009d7775b26bee8a6e 100644 (file)
@@ -473,8 +473,6 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
                                            struct sta_info *sta,
                                            struct ieee80211_hdr *hdr)
 {
-       ktime_t skb_dprt;
-       struct timespec dprt_time;
        u32 msrmnt;
        u16 tid;
        u8 *qc;
@@ -506,9 +504,8 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
 
        tx_lat = &sta->tx_lat[tid];
 
-       ktime_get_ts(&dprt_time); /* time stamp completion time */
-       skb_dprt = ktime_set(dprt_time.tv_sec, dprt_time.tv_nsec);
-       msrmnt = ktime_to_ms(ktime_sub(skb_dprt, skb_arv));
+       /* Calculate the latency */
+       msrmnt = ktime_to_ms(ktime_sub(ktime_get(), skb_arv));
 
        if (tx_lat->max < msrmnt) /* update stats */
                tx_lat->max = msrmnt;
index 652813b2d3df6afda3d136f1c8db1fd35f2d3b1d..f7185338a0fad33508f17837041cbdb2c2d90d68 100644 (file)
@@ -8,7 +8,30 @@
  */
 
 #include <linux/ieee80211.h>
+#include <net/cfg80211.h>
 #include "ieee80211_i.h"
+#include "driver-ops.h"
+
+/* give usermode some time for retries in setting up the TDLS session */
+#define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
+
+void ieee80211_tdls_peer_del_work(struct work_struct *wk)
+{
+       struct ieee80211_sub_if_data *sdata;
+       struct ieee80211_local *local;
+
+       sdata = container_of(wk, struct ieee80211_sub_if_data,
+                            tdls_peer_del_work.work);
+       local = sdata->local;
+
+       mutex_lock(&local->mtx);
+       if (!is_zero_ether_addr(sdata->tdls_peer)) {
+               tdls_dbg(sdata, "TDLS del peer %pM\n", sdata->tdls_peer);
+               sta_info_destroy_addr(sdata, sdata->tdls_peer);
+               eth_zero_addr(sdata->tdls_peer);
+       }
+       mutex_unlock(&local->mtx);
+}
 
 static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
 {
@@ -168,28 +191,20 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
        return 0;
 }
 
-int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
-                       const u8 *peer, u8 action_code, u8 dialog_token,
-                       u16 status_code, u32 peer_capability,
-                       const u8 *extra_ies, size_t extra_ies_len)
+static int
+ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
+                               const u8 *peer, u8 action_code,
+                               u8 dialog_token, u16 status_code,
+                               u32 peer_capability, bool initiator,
+                               const u8 *extra_ies, size_t extra_ies_len)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb = NULL;
        bool send_direct;
+       const u8 *init_addr, *rsp_addr;
        int ret;
 
-       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
-               return -ENOTSUPP;
-
-       /* make sure we are in managed mode, and associated */
-       if (sdata->vif.type != NL80211_IFTYPE_STATION ||
-           !sdata->u.mgd.associated)
-               return -EINVAL;
-
-       tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
-                action_code, peer);
-
        skb = dev_alloc_skb(local->hw.extra_tx_headroom +
                            max(sizeof(struct ieee80211_mgmt),
                                sizeof(struct ieee80211_tdls_data)) +
@@ -230,27 +245,42 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
        if (extra_ies_len)
                memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
 
-       /* the TDLS link IE is always added last */
+       /* sanity check for initiator */
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
        case WLAN_TDLS_SETUP_CONFIRM:
-       case WLAN_TDLS_TEARDOWN:
        case WLAN_TDLS_DISCOVERY_REQUEST:
-               /* we are the initiator */
-               ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
-                                          sdata->u.mgd.bssid);
+               if (!initiator) {
+                       ret = -EINVAL;
+                       goto fail;
+               }
                break;
        case WLAN_TDLS_SETUP_RESPONSE:
        case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-               /* we are the responder */
-               ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
-                                          sdata->u.mgd.bssid);
+               if (initiator) {
+                       ret = -EINVAL;
+                       goto fail;
+               }
+               break;
+       case WLAN_TDLS_TEARDOWN:
+               /* any value is ok */
                break;
        default:
                ret = -ENOTSUPP;
                goto fail;
        }
 
+       if (initiator) {
+               init_addr = sdata->vif.addr;
+               rsp_addr = peer;
+       } else {
+               init_addr = peer;
+               rsp_addr = sdata->vif.addr;
+       }
+
+       ieee80211_tdls_add_link_ie(skb, init_addr, rsp_addr,
+                                  sdata->u.mgd.bssid);
+
        if (send_direct) {
                ieee80211_tx_skb(sdata, skb);
                return 0;
@@ -284,11 +314,171 @@ fail:
        return ret;
 }
 
+static int
+ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev,
+                         const u8 *peer, u8 action_code, u8 dialog_token,
+                         u16 status_code, u32 peer_capability, bool initiator,
+                         const u8 *extra_ies, size_t extra_ies_len)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       int ret;
+
+       mutex_lock(&local->mtx);
+
+       /* we don't support concurrent TDLS peer setups */
+       if (!is_zero_ether_addr(sdata->tdls_peer) &&
+           !ether_addr_equal(sdata->tdls_peer, peer)) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       /*
+        * make sure we have a STA representing the peer so we drop or buffer
+        * non-TDLS-setup frames to the peer. We can't send other packets
+        * during setup through the AP path
+        */
+       rcu_read_lock();
+       if (!sta_info_get(sdata, peer)) {
+               rcu_read_unlock();
+               ret = -ENOLINK;
+               goto exit;
+       }
+       rcu_read_unlock();
+
+       ieee80211_flush_queues(local, sdata);
+
+       ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
+                                             dialog_token, status_code,
+                                             peer_capability, initiator,
+                                             extra_ies, extra_ies_len);
+       if (ret < 0)
+               goto exit;
+
+       memcpy(sdata->tdls_peer, peer, ETH_ALEN);
+       ieee80211_queue_delayed_work(&sdata->local->hw,
+                                    &sdata->tdls_peer_del_work,
+                                    TDLS_PEER_SETUP_TIMEOUT);
+
+exit:
+       mutex_unlock(&local->mtx);
+       return ret;
+}
+
+static int
+ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev,
+                            const u8 *peer, u8 action_code, u8 dialog_token,
+                            u16 status_code, u32 peer_capability,
+                            bool initiator, const u8 *extra_ies,
+                            size_t extra_ies_len)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
+       int ret;
+
+       /*
+        * No packets can be transmitted to the peer via the AP during setup -
+        * the STA is set as a TDLS peer, but is not authorized.
+        * During teardown, we prevent direct transmissions by stopping the
+        * queues and flushing all direct packets.
+        */
+       ieee80211_stop_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
+       ieee80211_flush_queues(local, sdata);
+
+       ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
+                                             dialog_token, status_code,
+                                             peer_capability, initiator,
+                                             extra_ies, extra_ies_len);
+       if (ret < 0)
+               sdata_err(sdata, "Failed sending TDLS teardown packet %d\n",
+                         ret);
+
+       /*
+        * Remove the STA AUTH flag to force further traffic through the AP. If
+        * the STA was unreachable, it was already removed.
+        */
+       rcu_read_lock();
+       sta = sta_info_get(sdata, peer);
+       if (sta)
+               clear_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+       rcu_read_unlock();
+
+       ieee80211_wake_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN);
+
+       return 0;
+}
+
+int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                       const u8 *peer, u8 action_code, u8 dialog_token,
+                       u16 status_code, u32 peer_capability,
+                       bool initiator, const u8 *extra_ies,
+                       size_t extra_ies_len)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       int ret;
+
+       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+               return -ENOTSUPP;
+
+       /* make sure we are in managed mode, and associated */
+       if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+           !sdata->u.mgd.associated)
+               return -EINVAL;
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_RESPONSE:
+               ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code,
+                                               dialog_token, status_code,
+                                               peer_capability, initiator,
+                                               extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_TEARDOWN:
+               ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer,
+                                                  action_code, dialog_token,
+                                                  status_code,
+                                                  peer_capability, initiator,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               /*
+                * Protect the discovery so we can hear the TDLS discovery
+                * response frame. It is transmitted directly and not buffered
+                * by the AP.
+                */
+               drv_mgd_protect_tdls_discover(sdata->local, sdata);
+               /* fall-through */
+       case WLAN_TDLS_SETUP_CONFIRM:
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               /* no special handling */
+               ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
+                                                     action_code,
+                                                     dialog_token,
+                                                     status_code,
+                                                     peer_capability,
+                                                     initiator, extra_ies,
+                                                     extra_ies_len);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       tdls_dbg(sdata, "TDLS mgmt action %d peer %pM status %d\n",
+                action_code, peer, ret);
+       return ret;
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, enum nl80211_tdls_operation oper)
 {
        struct sta_info *sta;
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       int ret;
 
        if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
                return -ENOTSUPP;
@@ -296,6 +486,18 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                return -EINVAL;
 
+       switch (oper) {
+       case NL80211_TDLS_ENABLE_LINK:
+       case NL80211_TDLS_DISABLE_LINK:
+               break;
+       case NL80211_TDLS_TEARDOWN:
+       case NL80211_TDLS_SETUP:
+       case NL80211_TDLS_DISCOVERY_REQ:
+               /* We don't support in-driver setup/teardown/discovery */
+               return -ENOTSUPP;
+       }
+
+       mutex_lock(&local->mtx);
        tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
 
        switch (oper) {
@@ -304,22 +506,49 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                sta = sta_info_get(sdata, peer);
                if (!sta) {
                        rcu_read_unlock();
-                       return -ENOLINK;
+                       ret = -ENOLINK;
+                       break;
                }
 
                set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
                rcu_read_unlock();
+
+               WARN_ON_ONCE(is_zero_ether_addr(sdata->tdls_peer) ||
+                            !ether_addr_equal(sdata->tdls_peer, peer));
+               ret = 0;
                break;
        case NL80211_TDLS_DISABLE_LINK:
-               return sta_info_destroy_addr(sdata, peer);
-       case NL80211_TDLS_TEARDOWN:
-       case NL80211_TDLS_SETUP:
-       case NL80211_TDLS_DISCOVERY_REQ:
-               /* We don't support in-driver setup/teardown/discovery */
-               return -ENOTSUPP;
+               /* flush a potentially queued teardown packet */
+               ieee80211_flush_queues(local, sdata);
+
+               ret = sta_info_destroy_addr(sdata, peer);
+               break;
        default:
-               return -ENOTSUPP;
+               ret = -ENOTSUPP;
+               break;
        }
 
-       return 0;
+       if (ret == 0 && ether_addr_equal(sdata->tdls_peer, peer)) {
+               cancel_delayed_work(&sdata->tdls_peer_del_work);
+               eth_zero_addr(sdata->tdls_peer);
+       }
+
+       mutex_unlock(&local->mtx);
+       return ret;
+}
+
+void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
+                                enum nl80211_tdls_operation oper,
+                                u16 reason_code, gfp_t gfp)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) {
+               sdata_err(sdata, "Discarding TDLS oper %d - not STA or disconnected\n",
+                         oper);
+               return;
+       }
+
+       cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp);
 }
+EXPORT_SYMBOL(ieee80211_tdls_oper_request);
index cfe1a0688b5ce2846eb906b3f196c5cf51a4590b..02ac535d1274217653e2f9a48c6f6e60ed6d0927 100644 (file)
@@ -1330,6 +1330,13 @@ DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
        TP_ARGS(local, sdata)
 );
 
+DEFINE_EVENT(local_sdata_evt, drv_mgd_protect_tdls_discover,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata),
+
+       TP_ARGS(local, sdata)
+);
+
 DECLARE_EVENT_CLASS(local_chanctx,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_chanctx *ctx),
index 5214686d9fd1ec9ab4bc1e2a466532bd3c829c10..865bdaf06ff16d8a94f57c79f6d11d7f84a29620 100644 (file)
@@ -250,7 +250,8 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (local->hw.conf.flags & IEEE80211_CONF_PS) {
                ieee80211_stop_queues_by_reason(&local->hw,
                                                IEEE80211_MAX_QUEUE_MAP,
-                                               IEEE80211_QUEUE_STOP_REASON_PS);
+                                               IEEE80211_QUEUE_STOP_REASON_PS,
+                                               false);
                ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
                ieee80211_queue_work(&local->hw,
                                     &local->dynamic_ps_disable_work);
@@ -469,7 +470,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                return TX_CONTINUE;
 
        if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
-                     test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
+                     test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
+                     test_sta_flag(sta, WLAN_STA_PS_DELIVER)) &&
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
 
@@ -486,7 +488,8 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                 * ahead and Tx the packet.
                 */
                if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
-                   !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+                   !test_sta_flag(sta, WLAN_STA_PS_DRIVER) &&
+                   !test_sta_flag(sta, WLAN_STA_PS_DELIVER)) {
                        spin_unlock(&sta->ps_lock);
                        return TX_CONTINUE;
                }
@@ -1618,12 +1621,12 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
 {
        struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
        struct ieee80211_chanctx_conf *chanctx_conf;
-       struct ieee80211_channel *chan;
        struct ieee80211_radiotap_header *prthdr =
                (struct ieee80211_radiotap_header *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr;
        struct ieee80211_sub_if_data *tmp_sdata, *sdata;
+       struct cfg80211_chan_def *chandef;
        u16 len_rthdr;
        int hdrlen;
 
@@ -1721,9 +1724,9 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
        }
 
        if (chanctx_conf)
-               chan = chanctx_conf->def.chan;
+               chandef = &chanctx_conf->def;
        else if (!local->use_chanctx)
-               chan = local->_oper_chandef.chan;
+               chandef = &local->_oper_chandef;
        else
                goto fail_rcu;
 
@@ -1743,10 +1746,11 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
         * radar detection by itself. We can do that later by adding a
         * monitor flag interfaces used for AP support.
         */
-       if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)))
+       if (!cfg80211_reg_can_beacon(local->hw.wiphy, chandef,
+                                    sdata->vif.type))
                goto fail_rcu;
 
-       ieee80211_xmit(sdata, skb, chan->band);
+       ieee80211_xmit(sdata, skb, chandef->chan->band);
        rcu_read_unlock();
 
        return NETDEV_TX_OK;
@@ -1767,15 +1771,12 @@ fail:
 static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
                                              struct sk_buff *skb)
 {
-       struct timespec skb_arv;
        struct ieee80211_tx_latency_bin_ranges *tx_latency;
 
        tx_latency = rcu_dereference(local->tx_latency);
        if (!tx_latency)
                return;
-
-       ktime_get_ts(&skb_arv);
-       skb->tstamp = ktime_set(skb_arv.tv_sec, skb_arv.tv_nsec);
+       skb->tstamp = ktime_get();
 }
 
 /**
@@ -1810,7 +1811,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
        int nh_pos, h_pos;
        struct sta_info *sta = NULL;
        bool wme_sta = false, authorized = false, tdls_auth = false;
-       bool tdls_direct = false;
+       bool tdls_peer = false, tdls_setup_frame = false;
        bool multicast;
        u32 info_flags = 0;
        u16 info_id = 0;
@@ -1952,34 +1953,35 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
 #endif
        case NL80211_IFTYPE_STATION:
                if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
-                       bool tdls_peer = false;
-
                        sta = sta_info_get(sdata, skb->data);
                        if (sta) {
                                authorized = test_sta_flag(sta,
                                                        WLAN_STA_AUTHORIZED);
                                wme_sta = test_sta_flag(sta, WLAN_STA_WME);
                                tdls_peer = test_sta_flag(sta,
-                                                        WLAN_STA_TDLS_PEER);
+                                                         WLAN_STA_TDLS_PEER);
                                tdls_auth = test_sta_flag(sta,
                                                WLAN_STA_TDLS_PEER_AUTH);
                        }
 
-                       /*
-                        * If the TDLS link is enabled, send everything
-                        * directly. Otherwise, allow TDLS setup frames
-                        * to be transmitted indirectly.
-                        */
-                       tdls_direct = tdls_peer && (tdls_auth ||
-                                !(ethertype == ETH_P_TDLS && skb->len > 14 &&
-                                  skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
+                       if (tdls_peer)
+                               tdls_setup_frame =
+                                       ethertype == ETH_P_TDLS &&
+                                       skb->len > 14 &&
+                                       skb->data[14] == WLAN_TDLS_SNAP_RFTYPE;
                }
 
-               if (tdls_direct) {
-                       /* link during setup - throw out frames to peer */
-                       if (!tdls_auth)
-                               goto fail_rcu;
+               /*
+                * TDLS link during setup - throw out frames to peer. We allow
+                * TDLS-setup frames to unauthorized peers for the special case
+                * of a link teardown after a TDLS sta is removed due to being
+                * unreachable.
+                */
+               if (tdls_peer && !tdls_auth && !tdls_setup_frame)
+                       goto fail_rcu;
 
+               /* send direct packets to authorized TDLS peers */
+               if (tdls_peer && tdls_auth) {
                        /* DA SA BSSID */
                        memcpy(hdr.addr1, skb->data, ETH_ALEN);
                        memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -2423,7 +2425,7 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
        u8 *beacon_data;
        size_t beacon_data_len;
        int i;
-       u8 count = sdata->csa_current_counter;
+       u8 count = beacon->csa_current_counter;
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
@@ -2442,46 +2444,53 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
+       rcu_read_lock();
        for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
-               u16 counter_offset_beacon =
-                       sdata->csa_counter_offset_beacon[i];
-               u16 counter_offset_presp = sdata->csa_counter_offset_presp[i];
+               resp = rcu_dereference(sdata->u.ap.probe_resp);
 
-               if (counter_offset_beacon) {
-                       if (WARN_ON(counter_offset_beacon >= beacon_data_len))
-                               return;
-
-                       beacon_data[counter_offset_beacon] = count;
-               }
-
-               if (sdata->vif.type == NL80211_IFTYPE_AP &&
-                   counter_offset_presp) {
-                       rcu_read_lock();
-                       resp = rcu_dereference(sdata->u.ap.probe_resp);
-
-                       /* If nl80211 accepted the offset, this should
-                        * not happen.
-                        */
-                       if (WARN_ON(!resp)) {
+               if (beacon->csa_counter_offsets[i]) {
+                       if (WARN_ON_ONCE(beacon->csa_counter_offsets[i] >=
+                                        beacon_data_len)) {
                                rcu_read_unlock();
                                return;
                        }
-                       resp->data[counter_offset_presp] = count;
-                       rcu_read_unlock();
+
+                       beacon_data[beacon->csa_counter_offsets[i]] = count;
                }
+
+               if (sdata->vif.type == NL80211_IFTYPE_AP && resp)
+                       resp->data[resp->csa_counter_offsets[i]] = count;
        }
+       rcu_read_unlock();
 }
 
 u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct beacon_data *beacon = NULL;
+       u8 count = 0;
+
+       rcu_read_lock();
 
-       sdata->csa_current_counter--;
+       if (sdata->vif.type == NL80211_IFTYPE_AP)
+               beacon = rcu_dereference(sdata->u.ap.beacon);
+       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               beacon = rcu_dereference(sdata->u.ibss.presp);
+       else if (ieee80211_vif_is_mesh(&sdata->vif))
+               beacon = rcu_dereference(sdata->u.mesh.beacon);
+
+       if (!beacon)
+               goto unlock;
+
+       beacon->csa_current_counter--;
 
        /* the counter should never reach 0 */
-       WARN_ON(!sdata->csa_current_counter);
+       WARN_ON_ONCE(!beacon->csa_current_counter);
+       count = beacon->csa_current_counter;
 
-       return sdata->csa_current_counter;
+unlock:
+       rcu_read_unlock();
+       return count;
 }
 EXPORT_SYMBOL(ieee80211_csa_update_counter);
 
@@ -2491,7 +2500,6 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
        struct beacon_data *beacon = NULL;
        u8 *beacon_data;
        size_t beacon_data_len;
-       int counter_beacon = sdata->csa_counter_offset_beacon[0];
        int ret = false;
 
        if (!ieee80211_sdata_running(sdata))
@@ -2529,10 +2537,13 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
                goto out;
        }
 
-       if (WARN_ON(counter_beacon > beacon_data_len))
+       if (!beacon->csa_counter_offsets[0])
                goto out;
 
-       if (beacon_data[counter_beacon] == 1)
+       if (WARN_ON_ONCE(beacon->csa_counter_offsets[0] > beacon_data_len))
+               goto out;
+
+       if (beacon_data[beacon->csa_counter_offsets[0]] == 1)
                ret = true;
  out:
        rcu_read_unlock();
@@ -2548,6 +2559,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                       bool is_template)
 {
        struct ieee80211_local *local = hw_to_local(hw);
+       struct beacon_data *beacon = NULL;
        struct sk_buff *skb = NULL;
        struct ieee80211_tx_info *info;
        struct ieee80211_sub_if_data *sdata = NULL;
@@ -2569,10 +2581,10 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
 
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                struct ieee80211_if_ap *ap = &sdata->u.ap;
-               struct beacon_data *beacon = rcu_dereference(ap->beacon);
 
+               beacon = rcu_dereference(ap->beacon);
                if (beacon) {
-                       if (sdata->vif.csa_active) {
+                       if (beacon->csa_counter_offsets[0]) {
                                if (!is_template)
                                        ieee80211_csa_update_counter(vif);
 
@@ -2613,37 +2625,37 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
        } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
                struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
                struct ieee80211_hdr *hdr;
-               struct beacon_data *presp = rcu_dereference(ifibss->presp);
 
-               if (!presp)
+               beacon = rcu_dereference(ifibss->presp);
+               if (!beacon)
                        goto out;
 
-               if (sdata->vif.csa_active) {
+               if (beacon->csa_counter_offsets[0]) {
                        if (!is_template)
                                ieee80211_csa_update_counter(vif);
 
-                       ieee80211_set_csa(sdata, presp);
+                       ieee80211_set_csa(sdata, beacon);
                }
 
-               skb = dev_alloc_skb(local->tx_headroom + presp->head_len +
+               skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
                                    local->hw.extra_beacon_tailroom);
                if (!skb)
                        goto out;
                skb_reserve(skb, local->tx_headroom);
-               memcpy(skb_put(skb, presp->head_len), presp->head,
-                      presp->head_len);
+               memcpy(skb_put(skb, beacon->head_len), beacon->head,
+                      beacon->head_len);
 
                hdr = (struct ieee80211_hdr *) skb->data;
                hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
                                                 IEEE80211_STYPE_BEACON);
        } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
                struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-               struct beacon_data *bcn = rcu_dereference(ifmsh->beacon);
 
-               if (!bcn)
+               beacon = rcu_dereference(ifmsh->beacon);
+               if (!beacon)
                        goto out;
 
-               if (sdata->vif.csa_active) {
+               if (beacon->csa_counter_offsets[0]) {
                        if (!is_template)
                                /* TODO: For mesh csa_counter is in TU, so
                                 * decrementing it by one isn't correct, but
@@ -2652,40 +2664,42 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                                 */
                                ieee80211_csa_update_counter(vif);
 
-                       ieee80211_set_csa(sdata, bcn);
+                       ieee80211_set_csa(sdata, beacon);
                }
 
                if (ifmsh->sync_ops)
-                       ifmsh->sync_ops->adjust_tbtt(sdata, bcn);
+                       ifmsh->sync_ops->adjust_tbtt(sdata, beacon);
 
                skb = dev_alloc_skb(local->tx_headroom +
-                                   bcn->head_len +
+                                   beacon->head_len +
                                    256 + /* TIM IE */
-                                   bcn->tail_len +
+                                   beacon->tail_len +
                                    local->hw.extra_beacon_tailroom);
                if (!skb)
                        goto out;
                skb_reserve(skb, local->tx_headroom);
-               memcpy(skb_put(skb, bcn->head_len), bcn->head, bcn->head_len);
+               memcpy(skb_put(skb, beacon->head_len), beacon->head,
+                      beacon->head_len);
                ieee80211_beacon_add_tim(sdata, &ifmsh->ps, skb, is_template);
 
                if (offs) {
-                       offs->tim_offset = bcn->head_len;
-                       offs->tim_length = skb->len - bcn->head_len;
+                       offs->tim_offset = beacon->head_len;
+                       offs->tim_length = skb->len - beacon->head_len;
                }
 
-               memcpy(skb_put(skb, bcn->tail_len), bcn->tail, bcn->tail_len);
+               memcpy(skb_put(skb, beacon->tail_len), beacon->tail,
+                      beacon->tail_len);
        } else {
                WARN_ON(1);
                goto out;
        }
 
        /* CSA offsets */
-       if (offs) {
+       if (offs && beacon) {
                int i;
 
                for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
-                       u16 csa_off = sdata->csa_counter_offset_beacon[i];
+                       u16 csa_off = beacon->csa_counter_offsets[i];
 
                        if (!csa_off)
                                continue;
index a6cda52ed9203e55047841f1b4a62ab301ecb26e..df1bb7e16cfe8e93560eaefef4aa6d69a0b55214 100644 (file)
@@ -317,7 +317,8 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
 }
 
 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
-                                  enum queue_stop_reason reason)
+                                  enum queue_stop_reason reason,
+                                  bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
@@ -329,7 +330,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
        if (!test_bit(reason, &local->queue_stop_reasons[queue]))
                return;
 
-       __clear_bit(reason, &local->queue_stop_reasons[queue]);
+       if (!refcounted)
+               local->q_stop_reasons[queue][reason] = 0;
+       else
+               local->q_stop_reasons[queue][reason]--;
+
+       if (local->q_stop_reasons[queue][reason] == 0)
+               __clear_bit(reason, &local->queue_stop_reasons[queue]);
 
        if (local->queue_stop_reasons[queue] != 0)
                /* someone still has this queue stopped */
@@ -344,25 +351,28 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
 }
 
 void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
-                                   enum queue_stop_reason reason)
+                                   enum queue_stop_reason reason,
+                                   bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        unsigned long flags;
 
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-       __ieee80211_wake_queue(hw, queue, reason);
+       __ieee80211_wake_queue(hw, queue, reason, refcounted);
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
 void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue)
 {
        ieee80211_wake_queue_by_reason(hw, queue,
-                                      IEEE80211_QUEUE_STOP_REASON_DRIVER);
+                                      IEEE80211_QUEUE_STOP_REASON_DRIVER,
+                                      false);
 }
 EXPORT_SYMBOL(ieee80211_wake_queue);
 
 static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
-                                  enum queue_stop_reason reason)
+                                  enum queue_stop_reason reason,
+                                  bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_sub_if_data *sdata;
@@ -373,10 +383,13 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
        if (WARN_ON(queue >= hw->queues))
                return;
 
-       if (test_bit(reason, &local->queue_stop_reasons[queue]))
-               return;
+       if (!refcounted)
+               local->q_stop_reasons[queue][reason] = 1;
+       else
+               local->q_stop_reasons[queue][reason]++;
 
-       __set_bit(reason, &local->queue_stop_reasons[queue]);
+       if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
+               return;
 
        if (local->hw.queues < IEEE80211_NUM_ACS)
                n_acs = 1;
@@ -398,20 +411,22 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
 }
 
 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
-                                   enum queue_stop_reason reason)
+                                   enum queue_stop_reason reason,
+                                   bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        unsigned long flags;
 
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-       __ieee80211_stop_queue(hw, queue, reason);
+       __ieee80211_stop_queue(hw, queue, reason, refcounted);
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
 void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue)
 {
        ieee80211_stop_queue_by_reason(hw, queue,
-                                      IEEE80211_QUEUE_STOP_REASON_DRIVER);
+                                      IEEE80211_QUEUE_STOP_REASON_DRIVER,
+                                      false);
 }
 EXPORT_SYMBOL(ieee80211_stop_queue);
 
@@ -429,9 +444,11 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
        }
 
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-       __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+       __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+                              false);
        __skb_queue_tail(&local->pending[queue], skb);
-       __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+       __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+                              false);
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
@@ -455,20 +472,23 @@ void ieee80211_add_pending_skbs(struct ieee80211_local *local,
                queue = info->hw_queue;
 
                __ieee80211_stop_queue(hw, queue,
-                               IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+                               IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+                               false);
 
                __skb_queue_tail(&local->pending[queue], skb);
        }
 
        for (i = 0; i < hw->queues; i++)
                __ieee80211_wake_queue(hw, i,
-                       IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
+                       IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+                       false);
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
 
 void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
                                     unsigned long queues,
-                                    enum queue_stop_reason reason)
+                                    enum queue_stop_reason reason,
+                                    bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        unsigned long flags;
@@ -477,7 +497,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
 
        for_each_set_bit(i, &queues, hw->queues)
-               __ieee80211_stop_queue(hw, i, reason);
+               __ieee80211_stop_queue(hw, i, reason, refcounted);
 
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
@@ -485,7 +505,8 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
 void ieee80211_stop_queues(struct ieee80211_hw *hw)
 {
        ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_DRIVER);
+                                       IEEE80211_QUEUE_STOP_REASON_DRIVER,
+                                       false);
 }
 EXPORT_SYMBOL(ieee80211_stop_queues);
 
@@ -508,7 +529,8 @@ EXPORT_SYMBOL(ieee80211_queue_stopped);
 
 void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
                                     unsigned long queues,
-                                    enum queue_stop_reason reason)
+                                    enum queue_stop_reason reason,
+                                    bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        unsigned long flags;
@@ -517,7 +539,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
        spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
 
        for_each_set_bit(i, &queues, hw->queues)
-               __ieee80211_wake_queue(hw, i, reason);
+               __ieee80211_wake_queue(hw, i, reason, refcounted);
 
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 }
@@ -525,17 +547,16 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
 void ieee80211_wake_queues(struct ieee80211_hw *hw)
 {
        ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_DRIVER);
+                                       IEEE80211_QUEUE_STOP_REASON_DRIVER,
+                                       false);
 }
 EXPORT_SYMBOL(ieee80211_wake_queues);
 
-void ieee80211_flush_queues(struct ieee80211_local *local,
-                           struct ieee80211_sub_if_data *sdata)
+static unsigned int
+ieee80211_get_vif_queues(struct ieee80211_local *local,
+                        struct ieee80211_sub_if_data *sdata)
 {
-       u32 queues;
-
-       if (!local->ops->flush)
-               return;
+       unsigned int queues;
 
        if (sdata && local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
                int ac;
@@ -551,13 +572,46 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
                queues = BIT(local->hw.queues) - 1;
        }
 
-       ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_FLUSH);
+       return queues;
+}
+
+void ieee80211_flush_queues(struct ieee80211_local *local,
+                           struct ieee80211_sub_if_data *sdata)
+{
+       unsigned int queues;
+
+       if (!local->ops->flush)
+               return;
+
+       queues = ieee80211_get_vif_queues(local, sdata);
+
+       ieee80211_stop_queues_by_reason(&local->hw, queues,
+                                       IEEE80211_QUEUE_STOP_REASON_FLUSH,
+                                       false);
 
        drv_flush(local, sdata, queues, false);
 
-       ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_FLUSH);
+       ieee80211_wake_queues_by_reason(&local->hw, queues,
+                                       IEEE80211_QUEUE_STOP_REASON_FLUSH,
+                                       false);
+}
+
+void ieee80211_stop_vif_queues(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata,
+                              enum queue_stop_reason reason)
+{
+       ieee80211_stop_queues_by_reason(&local->hw,
+                                       ieee80211_get_vif_queues(local, sdata),
+                                       reason, true);
+}
+
+void ieee80211_wake_vif_queues(struct ieee80211_local *local,
+                              struct ieee80211_sub_if_data *sdata,
+                              enum queue_stop_reason reason)
+{
+       ieee80211_wake_queues_by_reason(&local->hw,
+                                       ieee80211_get_vif_queues(local, sdata),
+                                       reason, true);
 }
 
 static void __iterate_active_interfaces(struct ieee80211_local *local,
@@ -1166,14 +1220,17 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
        }
 }
 
-int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
-                            size_t buffer_len, const u8 *ie, size_t ie_len,
-                            enum ieee80211_band band, u32 rate_mask,
-                            struct cfg80211_chan_def *chandef)
+static int ieee80211_build_preq_ies_band(struct ieee80211_local *local,
+                                        u8 *buffer, size_t buffer_len,
+                                        const u8 *ie, size_t ie_len,
+                                        enum ieee80211_band band,
+                                        u32 rate_mask,
+                                        struct cfg80211_chan_def *chandef,
+                                        size_t *offset)
 {
        struct ieee80211_supported_band *sband;
        u8 *pos = buffer, *end = buffer + buffer_len;
-       size_t offset = 0, noffset;
+       size_t noffset;
        int supp_rates_len, i;
        u8 rates[32];
        int num_rates;
@@ -1181,6 +1238,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
        int shift;
        u32 rate_flags;
 
+       *offset = 0;
+
        sband = local->hw.wiphy->bands[band];
        if (WARN_ON_ONCE(!sband))
                return 0;
@@ -1219,12 +1278,12 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                noffset = ieee80211_ie_split(ie, ie_len,
                                             before_extrates,
                                             ARRAY_SIZE(before_extrates),
-                                            offset);
-               if (end - pos < noffset - offset)
+                                            *offset);
+               if (end - pos < noffset - *offset)
                        goto out_err;
-               memcpy(pos, ie + offset, noffset - offset);
-               pos += noffset - offset;
-               offset = noffset;
+               memcpy(pos, ie + *offset, noffset - *offset);
+               pos += noffset - *offset;
+               *offset = noffset;
        }
 
        ext_rates_len = num_rates - supp_rates_len;
@@ -1258,12 +1317,12 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                };
                noffset = ieee80211_ie_split(ie, ie_len,
                                             before_ht, ARRAY_SIZE(before_ht),
-                                            offset);
-               if (end - pos < noffset - offset)
+                                            *offset);
+               if (end - pos < noffset - *offset)
                        goto out_err;
-               memcpy(pos, ie + offset, noffset - offset);
-               pos += noffset - offset;
-               offset = noffset;
+               memcpy(pos, ie + *offset, noffset - *offset);
+               pos += noffset - *offset;
+               *offset = noffset;
        }
 
        if (sband->ht_cap.ht_supported) {
@@ -1298,12 +1357,12 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                };
                noffset = ieee80211_ie_split(ie, ie_len,
                                             before_vht, ARRAY_SIZE(before_vht),
-                                            offset);
-               if (end - pos < noffset - offset)
+                                            *offset);
+               if (end - pos < noffset - *offset)
                        goto out_err;
-               memcpy(pos, ie + offset, noffset - offset);
-               pos += noffset - offset;
-               offset = noffset;
+               memcpy(pos, ie + *offset, noffset - *offset);
+               pos += noffset - *offset;
+               *offset = noffset;
        }
 
        if (sband->vht_cap.vht_supported) {
@@ -1313,21 +1372,54 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                                                 sband->vht_cap.cap);
        }
 
-       /* add any remaining custom IEs */
-       if (ie && ie_len) {
-               noffset = ie_len;
-               if (end - pos < noffset - offset)
-                       goto out_err;
-               memcpy(pos, ie + offset, noffset - offset);
-               pos += noffset - offset;
-       }
-
        return pos - buffer;
  out_err:
        WARN_ONCE(1, "not enough space for preq IEs\n");
        return pos - buffer;
 }
 
+int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
+                            size_t buffer_len,
+                            struct ieee80211_scan_ies *ie_desc,
+                            const u8 *ie, size_t ie_len,
+                            u8 bands_used, u32 *rate_masks,
+                            struct cfg80211_chan_def *chandef)
+{
+       size_t pos = 0, old_pos = 0, custom_ie_offset = 0;
+       int i;
+
+       memset(ie_desc, 0, sizeof(*ie_desc));
+
+       for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+               if (bands_used & BIT(i)) {
+                       pos += ieee80211_build_preq_ies_band(local,
+                                                            buffer + pos,
+                                                            buffer_len - pos,
+                                                            ie, ie_len, i,
+                                                            rate_masks[i],
+                                                            chandef,
+                                                            &custom_ie_offset);
+                       ie_desc->ies[i] = buffer + old_pos;
+                       ie_desc->len[i] = pos - old_pos;
+                       old_pos = pos;
+               }
+       }
+
+       /* add any remaining custom IEs */
+       if (ie && ie_len) {
+               if (WARN_ONCE(buffer_len - pos < ie_len - custom_ie_offset,
+                             "not enough space for preq custom IEs\n"))
+                       return pos;
+               memcpy(buffer + pos, ie + custom_ie_offset,
+                      ie_len - custom_ie_offset);
+               ie_desc->common_ies = buffer + pos;
+               ie_desc->common_ie_len = ie_len - custom_ie_offset;
+               pos += ie_len - custom_ie_offset;
+       }
+
+       return pos;
+};
+
 struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
                                          u8 *dst, u32 ratemask,
                                          struct ieee80211_channel *chan,
@@ -1340,6 +1432,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        struct sk_buff *skb;
        struct ieee80211_mgmt *mgmt;
        int ies_len;
+       u32 rate_masks[IEEE80211_NUM_BANDS] = {};
+       struct ieee80211_scan_ies dummy_ie_desc;
 
        /*
         * Do not send DS Channel parameter for directed probe requests
@@ -1357,10 +1451,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
        if (!skb)
                return NULL;
 
+       rate_masks[chan->band] = ratemask;
        ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
-                                          skb_tailroom(skb),
-                                          ie, ie_len, chan->band,
-                                          ratemask, &chandef);
+                                          skb_tailroom(skb), &dummy_ie_desc,
+                                          ie, ie_len, BIT(chan->band),
+                                          rate_masks, &chandef);
        skb_put(skb, ies_len);
 
        if (dst) {
@@ -1604,7 +1699,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        if (local->use_chanctx) {
                mutex_lock(&local->chanctx_mtx);
                list_for_each_entry(ctx, &local->chanctx_list, list)
-                       WARN_ON(drv_add_chanctx(local, ctx));
+                       if (ctx->replace_state !=
+                           IEEE80211_CHANCTX_REPLACES_OTHER)
+                               WARN_ON(drv_add_chanctx(local, ctx));
                mutex_unlock(&local->chanctx_mtx);
 
                list_for_each_entry(sdata, &local->interfaces, list) {
@@ -1798,7 +1895,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        }
 
        ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
-                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+                                       IEEE80211_QUEUE_STOP_REASON_SUSPEND,
+                                       false);
 
        /*
         * Reconfigure sched scan if it was interrupted by FW restart or
@@ -2836,6 +2934,35 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
        ps->dtim_count = dtim_count;
 }
 
+static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
+                                        struct ieee80211_chanctx *ctx)
+{
+       struct ieee80211_sub_if_data *sdata;
+       u8 radar_detect = 0;
+
+       lockdep_assert_held(&local->chanctx_mtx);
+
+       if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED))
+               return 0;
+
+       list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list)
+               if (sdata->reserved_radar_required)
+                       radar_detect |= BIT(sdata->reserved_chandef.width);
+
+       /*
+        * An in-place reservation context should not have any assigned vifs
+        * until it replaces the other context.
+        */
+       WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER &&
+               !list_empty(&ctx->assigned_vifs));
+
+       list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list)
+               if (sdata->radar_required)
+                       radar_detect |= BIT(sdata->vif.bss_conf.chandef.width);
+
+       return radar_detect;
+}
+
 int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
                                 const struct cfg80211_chan_def *chandef,
                                 enum ieee80211_chanctx_mode chanmode,
@@ -2877,8 +3004,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
                num[iftype] = 1;
 
        list_for_each_entry(ctx, &local->chanctx_list, list) {
-               if (ctx->conf.radar_enabled)
-                       radar_detect |= BIT(ctx->conf.def.width);
+               if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
+                       continue;
+               radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
                if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
                        num_different_channels++;
                        continue;
@@ -2935,10 +3063,12 @@ int ieee80211_max_num_channels(struct ieee80211_local *local)
        lockdep_assert_held(&local->chanctx_mtx);
 
        list_for_each_entry(ctx, &local->chanctx_list, list) {
+               if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
+                       continue;
+
                num_different_channels++;
 
-               if (ctx->conf.radar_enabled)
-                       radar_detect |= BIT(ctx->conf.def.width);
+               radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
        }
 
        list_for_each_entry_rcu(sdata, &local->interfaces, list)
index 6ee2b586357275aa33c13454b37d15b970cf1181..9181fb6d643786788abfed37d3ed3d5130e3cb3e 100644 (file)
@@ -271,22 +271,6 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
        return ret;
 }
 
-
-static bool ieee80211_wep_is_weak_iv(struct sk_buff *skb,
-                                    struct ieee80211_key *key)
-{
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       unsigned int hdrlen;
-       u8 *ivpos;
-       u32 iv;
-
-       hdrlen = ieee80211_hdrlen(hdr->frame_control);
-       ivpos = skb->data + hdrlen;
-       iv = (ivpos[0] << 16) | (ivpos[1] << 8) | ivpos[2];
-
-       return ieee80211_wep_weak_iv(iv, key->conf.keylen);
-}
-
 ieee80211_rx_result
 ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
 {
@@ -301,16 +285,12 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
        if (!(status->flag & RX_FLAG_DECRYPTED)) {
                if (skb_linearize(rx->skb))
                        return RX_DROP_UNUSABLE;
-               if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
-                       rx->sta->wep_weak_iv_count++;
                if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
                        return RX_DROP_UNUSABLE;
        } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
                if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) +
                                            IEEE80211_WEP_IV_LEN))
                        return RX_DROP_UNUSABLE;
-               if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
-                       rx->sta->wep_weak_iv_count++;
                ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
                /* remove ICV */
                if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
index 2cf66d885e68f38360cfb1ced1a08a924b342d22..b36b2b99657870196437b616bb71c5a12f71ebb0 100644 (file)
@@ -143,6 +143,7 @@ static void
 mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
 {
        struct mac802154_sub_if_data *sdata;
+
        ASSERT_RTNL();
 
        sdata = netdev_priv(dev);
@@ -166,11 +167,13 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
        switch (type) {
        case IEEE802154_DEV_MONITOR:
                dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
-                                  name, mac802154_monitor_setup);
+                                  name, NET_NAME_UNKNOWN,
+                                  mac802154_monitor_setup);
                break;
        case IEEE802154_DEV_WPAN:
                dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
-                                  name, mac802154_wpan_setup);
+                                  name, NET_NAME_UNKNOWN,
+                                  mac802154_wpan_setup);
                break;
        default:
                dev = NULL;
@@ -276,7 +279,8 @@ ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
        }
 
        priv = wpan_phy_priv(phy);
-       priv->hw.phy = priv->phy = phy;
+       priv->phy = phy;
+       priv->hw.phy = priv->phy;
        priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
        priv->ops = ops;
 
@@ -302,29 +306,61 @@ EXPORT_SYMBOL(ieee802154_free_device);
 int ieee802154_register_device(struct ieee802154_dev *dev)
 {
        struct mac802154_priv *priv = mac802154_to_priv(dev);
-       int rc = -ENOMEM;
+       int rc = -ENOSYS;
+
+       if (dev->flags & IEEE802154_HW_TXPOWER) {
+               if (!priv->ops->set_txpower)
+                       goto out;
+
+               priv->phy->set_txpower = mac802154_set_txpower;
+       }
+
+       if (dev->flags & IEEE802154_HW_LBT) {
+               if (!priv->ops->set_lbt)
+                       goto out;
+
+               priv->phy->set_lbt = mac802154_set_lbt;
+       }
+
+       if (dev->flags & IEEE802154_HW_CCA_MODE) {
+               if (!priv->ops->set_cca_mode)
+                       goto out;
+
+               priv->phy->set_cca_mode = mac802154_set_cca_mode;
+       }
+
+       if (dev->flags & IEEE802154_HW_CCA_ED_LEVEL) {
+               if (!priv->ops->set_cca_ed_level)
+                       goto out;
+
+               priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
+       }
+
+       if (dev->flags & IEEE802154_HW_CSMA_PARAMS) {
+               if (!priv->ops->set_csma_params)
+                       goto out;
+
+               priv->phy->set_csma_params = mac802154_set_csma_params;
+       }
+
+       if (dev->flags & IEEE802154_HW_FRAME_RETRIES) {
+               if (!priv->ops->set_frame_retries)
+                       goto out;
+
+               priv->phy->set_frame_retries = mac802154_set_frame_retries;
+       }
 
        priv->dev_workqueue =
                create_singlethread_workqueue(wpan_phy_name(priv->phy));
-       if (!priv->dev_workqueue)
+       if (!priv->dev_workqueue) {
+               rc = -ENOMEM;
                goto out;
+       }
 
        wpan_phy_set_dev(priv->phy, priv->hw.parent);
 
        priv->phy->add_iface = mac802154_add_iface;
        priv->phy->del_iface = mac802154_del_iface;
-       if (priv->ops->set_txpower)
-               priv->phy->set_txpower = mac802154_set_txpower;
-       if (priv->ops->set_lbt)
-               priv->phy->set_lbt = mac802154_set_lbt;
-       if (priv->ops->set_cca_mode)
-               priv->phy->set_cca_mode = mac802154_set_cca_mode;
-       if (priv->ops->set_cca_ed_level)
-               priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
-       if (priv->ops->set_csma_params)
-               priv->phy->set_csma_params = mac802154_set_csma_params;
-       if (priv->ops->set_frame_retries)
-               priv->phy->set_frame_retries = mac802154_set_frame_retries;
 
        rc = wpan_phy_register(priv->phy);
        if (rc < 0)
index 1456f73b02b9f8ae92909d84ee408d0c6ca93987..457058142098376bb9731600a9c162aaf4247b64 100644 (file)
@@ -538,6 +538,7 @@ static int llsec_recover_addr(struct mac802154_llsec *sec,
                              struct ieee802154_addr *addr)
 {
        __le16 caddr = sec->params.coord_shortaddr;
+
        addr->pan_id = sec->params.pan_id;
 
        if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
index 15aa2f2b03a78c29138db43c08073c4ba2817e54..868a040fd422ea6e877cc7df036b425f8b62008e 100644 (file)
@@ -175,9 +175,9 @@ static void phy_chan_notify(struct work_struct *work)
 
        mutex_lock(&priv->hw->phy->pib_lock);
        res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
-       if (res)
+       if (res) {
                pr_debug("set_channel failed\n");
-       else {
+       else {
                priv->hw->phy->current_channel = priv->chan;
                priv->hw->phy->current_page = priv->page;
        }
@@ -210,8 +210,9 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
                INIT_WORK(&work->work, phy_chan_notify);
                work->dev = dev;
                queue_work(priv->hw->dev_workqueue, &work->work);
-       } else
+       } else {
                mutex_unlock(&priv->hw->phy->pib_lock);
+       }
 }
 
 
index 6d1647399d4fefb4584a7bdd2c8ac5adf9b05702..8124353646ae64239556a0ed915f046f7e689864 100644 (file)
@@ -98,6 +98,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
                u8 *data = skb_put(skb, 2);
+
                data[0] = crc & 0xff;
                data[1] = crc >> 8;
        }
index e9410d17619df52b76620e6a81a51e3c9a549f33..ad751fe2e82b8ca017bc83f07d3dd7e9f04229f5 100644 (file)
@@ -46,6 +46,9 @@ config NF_CONNTRACK
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_LOG_COMMON
+       tristate
+
 if NF_CONNTRACK
 
 config NF_CONNTRACK_MARK
@@ -744,6 +747,7 @@ config NETFILTER_XT_TARGET_LED
 
 config NETFILTER_XT_TARGET_LOG
        tristate "LOG target support"
+       depends on NF_LOG_IPV4 && NF_LOG_IPV6
        default m if NETFILTER_ADVANCED=n
        help
          This option adds a `LOG' target, which allows you to create rules in
index bffdad774da753131937d53ba1693af8f25b83a0..8308624a406ac0d891338e73c44abe6a5fa4c8b0 100644 (file)
@@ -47,6 +47,9 @@ obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
 nf_nat-y       := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
                   nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
 
+# generic transport layer logging
+obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
+
 obj-$(CONFIG_NF_NAT) += nf_nat.o
 
 # NAT protocols (nf_nat)
index 581a6584ed0c651f3bfc7983f704aaeca5203e09..8416307fdd1d431e5f306efdedaf8682c36fa62b 100644 (file)
@@ -1806,92 +1806,6 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-#endif
-#if 0
-       {
-               .procname       = "timeout_established",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synsent",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synrecv",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_finwait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_timewait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_close",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_closewait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_lastack",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_listen",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synack",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_udp",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_icmp",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        { }
 };
index db801263ee9fa81a832f4656071198cdc16110e7..eadffb29dec0cccf8333a6f446d3756d843ec4ee 100644 (file)
@@ -886,8 +886,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
                rcu_read_unlock();
                if (!cp) {
-                       if (param->pe_data)
-                               kfree(param->pe_data);
+                       kfree(param->pe_data);
                        IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
                        return;
                }
index 1f4f954c4b47c7ecf763290659e900277ad5cb89..de88c4ab5146a168bc0866f3fdc07098b5ebe543 100644 (file)
@@ -352,40 +352,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        local_bh_enable();
 }
 
-static void death_by_event(unsigned long ul_conntrack)
-{
-       struct nf_conn *ct = (void *)ul_conntrack;
-       struct net *net = nf_ct_net(ct);
-       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
-
-       BUG_ON(ecache == NULL);
-
-       if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
-               /* bad luck, let's retry again */
-               ecache->timeout.expires = jiffies +
-                       (prandom_u32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ecache->timeout);
-               return;
-       }
-       /* we've got the event delivered, now it's dying */
-       set_bit(IPS_DYING_BIT, &ct->status);
-       nf_ct_put(ct);
-}
-
-static void nf_ct_dying_timeout(struct nf_conn *ct)
-{
-       struct net *net = nf_ct_net(ct);
-       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
-
-       BUG_ON(ecache == NULL);
-
-       /* set a new timer to retry event delivery */
-       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
-       ecache->timeout.expires = jiffies +
-               (prandom_u32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ecache->timeout);
-}
-
 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 {
        struct nf_conn_tstamp *tstamp;
@@ -394,15 +360,20 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
        if (tstamp && tstamp->stop == 0)
                tstamp->stop = ktime_to_ns(ktime_get_real());
 
-       if (!nf_ct_is_dying(ct) &&
-           unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
-           portid, report) < 0)) {
+       if (nf_ct_is_dying(ct))
+               goto delete;
+
+       if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+                                   portid, report) < 0) {
                /* destroy event was not delivered */
                nf_ct_delete_from_lists(ct);
-               nf_ct_dying_timeout(ct);
+               nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
                return false;
        }
+
+       nf_conntrack_ecache_work(nf_ct_net(ct));
        set_bit(IPS_DYING_BIT, &ct->status);
+ delete:
        nf_ct_delete_from_lists(ct);
        nf_ct_put(ct);
        return true;
@@ -1464,26 +1435,6 @@ void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
 
-static void nf_ct_release_dying_list(struct net *net)
-{
-       struct nf_conntrack_tuple_hash *h;
-       struct nf_conn *ct;
-       struct hlist_nulls_node *n;
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
-
-               spin_lock_bh(&pcpu->lock);
-               hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
-                       ct = nf_ct_tuplehash_to_ctrack(h);
-                       /* never fails to remove them, no listeners at this point */
-                       nf_ct_kill(ct);
-               }
-               spin_unlock_bh(&pcpu->lock);
-       }
-}
-
 static int untrack_refs(void)
 {
        int cnt = 0, cpu;
@@ -1548,7 +1499,6 @@ i_see_dead_people:
        busy = 0;
        list_for_each_entry(net, net_exit_list, exit_list) {
                nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
-               nf_ct_release_dying_list(net);
                if (atomic_read(&net->ct.count) != 0)
                        busy = 1;
        }
index 1df176146567aba5fbf922794022e04c0f93a83f..4e78c57b818f7d2cc387c793fe5a100e03fe067e 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
+#define ECACHE_RETRY_WAIT (HZ/10)
+
+enum retry_state {
+       STATE_CONGESTED,
+       STATE_RESTART,
+       STATE_DONE,
+};
+
+static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
+{
+       struct nf_conn *refs[16];
+       struct nf_conntrack_tuple_hash *h;
+       struct hlist_nulls_node *n;
+       unsigned int evicted = 0;
+       enum retry_state ret = STATE_DONE;
+
+       spin_lock(&pcpu->lock);
+
+       hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+               struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+               if (nf_ct_is_dying(ct))
+                       continue;
+
+               if (nf_conntrack_event(IPCT_DESTROY, ct)) {
+                       ret = STATE_CONGESTED;
+                       break;
+               }
+
+               /* we've got the event delivered, now it's dying */
+               set_bit(IPS_DYING_BIT, &ct->status);
+               refs[evicted] = ct;
+
+               if (++evicted >= ARRAY_SIZE(refs)) {
+                       ret = STATE_RESTART;
+                       break;
+               }
+       }
+
+       spin_unlock(&pcpu->lock);
+
+       /* can't _put while holding lock */
+       while (evicted)
+               nf_ct_put(refs[--evicted]);
+
+       return ret;
+}
+
+static void ecache_work(struct work_struct *work)
+{
+       struct netns_ct *ctnet =
+               container_of(work, struct netns_ct, ecache_dwork.work);
+       int cpu, delay = -1;
+       struct ct_pcpu *pcpu;
+
+       local_bh_disable();
+
+       for_each_possible_cpu(cpu) {
+               enum retry_state ret;
+
+               pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
+
+               ret = ecache_work_evict_list(pcpu);
+
+               switch (ret) {
+               case STATE_CONGESTED:
+                       delay = ECACHE_RETRY_WAIT;
+                       goto out;
+               case STATE_RESTART:
+                       delay = 0;
+                       break;
+               case STATE_DONE:
+                       break;
+               }
+       }
+
+ out:
+       local_bh_enable();
+
+       ctnet->ecache_dwork_pending = delay > 0;
+       if (delay >= 0)
+               schedule_delayed_work(&ctnet->ecache_dwork, delay);
+}
+
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
@@ -157,7 +241,6 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
 
 #define NF_CT_EVENTS_DEFAULT 1
 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
-static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table event_sysctl_table[] = {
@@ -168,13 +251,6 @@ static struct ctl_table event_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "nf_conntrack_events_retry_timeout",
-               .data           = &init_net.ct.sysctl_events_retry_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
        {}
 };
 #endif /* CONFIG_SYSCTL */
@@ -196,7 +272,6 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
                goto out;
 
        table[0].data = &net->ct.sysctl_events;
-       table[1].data = &net->ct.sysctl_events_retry_timeout;
 
        /* Don't export sysctls to unprivileged users */
        if (net->user_ns != &init_user_ns)
@@ -238,12 +313,13 @@ static void nf_conntrack_event_fini_sysctl(struct net *net)
 int nf_conntrack_ecache_pernet_init(struct net *net)
 {
        net->ct.sysctl_events = nf_ct_events;
-       net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
+       INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work);
        return nf_conntrack_event_init_sysctl(net);
 }
 
 void nf_conntrack_ecache_pernet_fini(struct net *net)
 {
+       cancel_delayed_work_sync(&net->ct.ecache_dwork);
        nf_conntrack_event_fini_sysctl(net);
 }
 
index 300ed1eec72942a64147ed15f422cd2e0b8c89ed..355a5c4ef7635bb7aca1e1068ca269b891949550 100644 (file)
@@ -745,8 +745,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
 {
        if (cb->args[1])
                nf_ct_put((struct nf_conn *)cb->args[1]);
-       if (cb->data)
-               kfree(cb->data);
+       kfree(cb->data);
        return 0;
 }
 
index 85296d4eac0e56c69052bbdab75269f3c7fd333e..daad6022c689c47a66a47e7a89a83c0c848c53d6 100644 (file)
 #define NF_LOG_PREFIXLEN               128
 #define NFLOGGER_NAME_LEN              64
 
-static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
+static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
 static DEFINE_MUTEX(nf_log_mutex);
 
 static struct nf_logger *__find_logger(int pf, const char *str_logger)
 {
-       struct nf_logger *t;
+       struct nf_logger *log;
+       int i;
+
+       for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
+               if (loggers[pf][i] == NULL)
+                       continue;
 
-       list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) {
-               if (!strnicmp(str_logger, t->name, strlen(t->name)))
-                       return t;
+               log = rcu_dereference_protected(loggers[pf][i],
+                                               lockdep_is_held(&nf_log_mutex));
+               if (!strnicmp(str_logger, log->name, strlen(log->name)))
+                       return log;
        }
 
        return NULL;
@@ -73,17 +79,14 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
        if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
                return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(logger->list); i++)
-               INIT_LIST_HEAD(&logger->list[i]);
-
        mutex_lock(&nf_log_mutex);
 
        if (pf == NFPROTO_UNSPEC) {
                for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
-                       list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
+                       rcu_assign_pointer(loggers[i][logger->type], logger);
        } else {
                /* register at end of list to honor first register win */
-               list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
+               rcu_assign_pointer(loggers[pf][logger->type], logger);
        }
 
        mutex_unlock(&nf_log_mutex);
@@ -98,7 +101,7 @@ void nf_log_unregister(struct nf_logger *logger)
 
        mutex_lock(&nf_log_mutex);
        for (i = 0; i < NFPROTO_NUMPROTO; i++)
-               list_del(&logger->list[i]);
+               RCU_INIT_POINTER(loggers[i][logger->type], NULL);
        mutex_unlock(&nf_log_mutex);
 }
 EXPORT_SYMBOL(nf_log_unregister);
@@ -129,6 +132,48 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf)
 }
 EXPORT_SYMBOL(nf_log_unbind_pf);
 
+void nf_logger_request_module(int pf, enum nf_log_type type)
+{
+       if (loggers[pf][type] == NULL)
+               request_module("nf-logger-%u-%u", pf, type);
+}
+EXPORT_SYMBOL_GPL(nf_logger_request_module);
+
+int nf_logger_find_get(int pf, enum nf_log_type type)
+{
+       struct nf_logger *logger;
+       int ret = -ENOENT;
+
+       logger = loggers[pf][type];
+       if (logger == NULL)
+               request_module("nf-logger-%u-%u", pf, type);
+
+       rcu_read_lock();
+       logger = rcu_dereference(loggers[pf][type]);
+       if (logger == NULL)
+               goto out;
+
+       if (logger && try_module_get(logger->me))
+               ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nf_logger_find_get);
+
+void nf_logger_put(int pf, enum nf_log_type type)
+{
+       struct nf_logger *logger;
+
+       BUG_ON(loggers[pf][type] == NULL);
+
+       rcu_read_lock();
+       logger = rcu_dereference(loggers[pf][type]);
+       module_put(logger->me);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_logger_put);
+
 void nf_log_packet(struct net *net,
                   u_int8_t pf,
                   unsigned int hooknum,
@@ -143,7 +188,11 @@ void nf_log_packet(struct net *net,
        const struct nf_logger *logger;
 
        rcu_read_lock();
-       logger = rcu_dereference(net->nf.nf_loggers[pf]);
+       if (loginfo != NULL)
+               logger = rcu_dereference(loggers[pf][loginfo->type]);
+       else
+               logger = rcu_dereference(net->nf.nf_loggers[pf]);
+
        if (logger) {
                va_start(args, fmt);
                vsnprintf(prefix, sizeof(prefix), fmt, args);
@@ -154,6 +203,63 @@ void nf_log_packet(struct net *net,
 }
 EXPORT_SYMBOL(nf_log_packet);
 
+#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
+
+struct nf_log_buf {
+       unsigned int    count;
+       char            buf[S_SIZE + 1];
+};
+static struct nf_log_buf emergency, *emergency_ptr = &emergency;
+
+__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...)
+{
+       va_list args;
+       int len;
+
+       if (likely(m->count < S_SIZE)) {
+               va_start(args, f);
+               len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
+               va_end(args);
+               if (likely(m->count + len < S_SIZE)) {
+                       m->count += len;
+                       return 0;
+               }
+       }
+       m->count = S_SIZE;
+       printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
+       return -1;
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_add);
+
+struct nf_log_buf *nf_log_buf_open(void)
+{
+       struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC);
+
+       if (unlikely(!m)) {
+               local_bh_disable();
+               do {
+                       m = xchg(&emergency_ptr, NULL);
+               } while (!m);
+       }
+       m->count = 0;
+       return m;
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_open);
+
+void nf_log_buf_close(struct nf_log_buf *m)
+{
+       m->buf[m->count] = 0;
+       printk("%s\n", m->buf);
+
+       if (likely(m != &emergency))
+               kfree(m);
+       else {
+               emergency_ptr = m;
+               local_bh_enable();
+       }
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_close);
+
 #ifdef CONFIG_PROC_FS
 static void *seq_start(struct seq_file *seq, loff_t *pos)
 {
@@ -188,8 +294,7 @@ static int seq_show(struct seq_file *s, void *v)
 {
        loff_t *pos = v;
        const struct nf_logger *logger;
-       struct nf_logger *t;
-       int ret;
+       int i, ret;
        struct net *net = seq_file_net(s);
 
        logger = rcu_dereference_protected(net->nf.nf_loggers[*pos],
@@ -203,11 +308,16 @@ static int seq_show(struct seq_file *s, void *v)
        if (ret < 0)
                return ret;
 
-       list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
-               ret = seq_printf(s, "%s", t->name);
+       for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
+               if (loggers[*pos][i] == NULL)
+                       continue;
+
+               logger = rcu_dereference_protected(loggers[*pos][i],
+                                          lockdep_is_held(&nf_log_mutex));
+               ret = seq_printf(s, "%s", logger->name);
                if (ret < 0)
                        return ret;
-               if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
+               if (i == 0 && loggers[*pos][i + 1] != NULL) {
                        ret = seq_printf(s, ",");
                        if (ret < 0)
                                return ret;
@@ -389,14 +499,5 @@ static struct pernet_operations nf_log_net_ops = {
 
 int __init netfilter_log_init(void)
 {
-       int i, ret;
-
-       ret = register_pernet_subsys(&nf_log_net_ops);
-       if (ret < 0)
-               return ret;
-
-       for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
-               INIT_LIST_HEAD(&(nf_loggers_l[i]));
-
-       return 0;
+       return register_pernet_subsys(&nf_log_net_ops);
 }
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
new file mode 100644 (file)
index 0000000..eeb8ef4
--- /dev/null
@@ -0,0 +1,187 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset)
+{
+       struct udphdr _udph;
+       const struct udphdr *uh;
+
+       if (proto == IPPROTO_UDP)
+               /* Max length: 10 "PROTO=UDP "     */
+               nf_log_buf_add(m, "PROTO=UDP ");
+       else    /* Max length: 14 "PROTO=UDPLITE " */
+               nf_log_buf_add(m, "PROTO=UDPLITE ");
+
+       if (fragment)
+               goto out;
+
+       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+       uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+       if (uh == NULL) {
+               nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+
+               return 1;
+       }
+
+       /* Max length: 20 "SPT=65535 DPT=65535 " */
+       nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ",
+                      ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len));
+
+out:
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_udp_header);
+
+int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset,
+                          unsigned int logflags)
+{
+       struct tcphdr _tcph;
+       const struct tcphdr *th;
+
+       /* Max length: 10 "PROTO=TCP " */
+       nf_log_buf_add(m, "PROTO=TCP ");
+
+       if (fragment)
+               return 0;
+
+       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+       th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+       if (th == NULL) {
+               nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+               return 1;
+       }
+
+       /* Max length: 20 "SPT=65535 DPT=65535 " */
+       nf_log_buf_add(m, "SPT=%u DPT=%u ",
+                      ntohs(th->source), ntohs(th->dest));
+       /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
+       if (logflags & XT_LOG_TCPSEQ) {
+               nf_log_buf_add(m, "SEQ=%u ACK=%u ",
+                              ntohl(th->seq), ntohl(th->ack_seq));
+       }
+
+       /* Max length: 13 "WINDOW=65535 " */
+       nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window));
+       /* Max length: 9 "RES=0x3C " */
+       nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
+                                           TCP_RESERVED_BITS) >> 22));
+       /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
+       if (th->cwr)
+               nf_log_buf_add(m, "CWR ");
+       if (th->ece)
+               nf_log_buf_add(m, "ECE ");
+       if (th->urg)
+               nf_log_buf_add(m, "URG ");
+       if (th->ack)
+               nf_log_buf_add(m, "ACK ");
+       if (th->psh)
+               nf_log_buf_add(m, "PSH ");
+       if (th->rst)
+               nf_log_buf_add(m, "RST ");
+       if (th->syn)
+               nf_log_buf_add(m, "SYN ");
+       if (th->fin)
+               nf_log_buf_add(m, "FIN ");
+       /* Max length: 11 "URGP=65535 " */
+       nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr));
+
+       if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
+               u_int8_t _opt[60 - sizeof(struct tcphdr)];
+               const u_int8_t *op;
+               unsigned int i;
+               unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
+
+               op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
+                                       optsize, _opt);
+               if (op == NULL) {
+                       nf_log_buf_add(m, "OPT (TRUNCATED)");
+                       return 1;
+               }
+
+               /* Max length: 127 "OPT (" 15*4*2chars ") " */
+               nf_log_buf_add(m, "OPT (");
+               for (i = 0; i < optsize; i++)
+                       nf_log_buf_add(m, "%02X", op[i]);
+
+               nf_log_buf_add(m, ") ");
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
+
+void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
+{
+       if (!sk || sk->sk_state == TCP_TIME_WAIT)
+               return;
+
+       read_lock_bh(&sk->sk_callback_lock);
+       if (sk->sk_socket && sk->sk_socket->file) {
+               const struct cred *cred = sk->sk_socket->file->f_cred;
+               nf_log_buf_add(m, "UID=%u GID=%u ",
+                       from_kuid_munged(&init_user_ns, cred->fsuid),
+                       from_kgid_munged(&init_user_ns, cred->fsgid));
+       }
+       read_unlock_bh(&sk->sk_callback_lock);
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_sk_uid_gid);
+
+void
+nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+                         unsigned int hooknum, const struct sk_buff *skb,
+                         const struct net_device *in,
+                         const struct net_device *out,
+                         const struct nf_loginfo *loginfo, const char *prefix)
+{
+       nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
+              '0' + loginfo->u.log.level, prefix,
+              in ? in->name : "",
+              out ? out->name : "");
+#ifdef CONFIG_BRIDGE_NETFILTER
+       if (skb->nf_bridge) {
+               const struct net_device *physindev;
+               const struct net_device *physoutdev;
+
+               physindev = skb->nf_bridge->physindev;
+               if (physindev && in != physindev)
+                       nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
+               physoutdev = skb->nf_bridge->physoutdev;
+               if (physoutdev && out != physoutdev)
+                       nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
+       }
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+
+static int __init nf_log_common_init(void)
+{
+       return 0;
+}
+
+static void __exit nf_log_common_exit(void) {}
+
+module_init(nf_log_common_init);
+module_exit(nf_log_common_exit);
+
+MODULE_LICENSE("GPL");
index a49907b1dabc973a0714712ffea5605251afbf71..552f97cd9fde5c510ac055642affb66adbda181d 100644 (file)
@@ -710,7 +710,7 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
        .flags          = NF_CT_EXT_F_PREALLOC,
 };
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
index 83a72a235cae7c6b9ee3c97dcabb8629fe6e7008..fbce552a796e14a6249e6e9a5d0d2ea16aaf3f09 100644 (file)
@@ -95,7 +95,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
 }
 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
                                   struct nf_nat_range *range)
 {
index c8be2cdac0bffebe77a8f9bb5fef0ac35c6f2de0..b8067b53ff3a8579e9ba126288c65ee89702fbac 100644 (file)
@@ -78,7 +78,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
        .manip_pkt              = dccp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = dccp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 754536f2c67488acd4345cdcec3740916ab24fea..cbc7ade1487b2d779d56e874518406e4a80d0e53 100644 (file)
@@ -59,7 +59,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
        .manip_pkt              = sctp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = sctp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 83ec8a6e4c36775f3041cedf31419d937ac077bb..37f5505f4529be54f45ef773088ca7aadbbccaed 100644 (file)
@@ -79,7 +79,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
        .manip_pkt              = tcp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = tcp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 7df613fb34a23d29af3b5d1775182ae38e86ded3..b0ede2f0d8bcbd0c7ee156cec4c73a432b9438cd 100644 (file)
@@ -70,7 +70,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_udp = {
        .manip_pkt              = udp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 776a0d1317b16df837b5017fc2020e99a222b167..368f14e01e758d9771534eee7744d5d5a66cf8ba 100644 (file)
@@ -69,7 +69,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
        .manip_pkt              = udplite_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udplite_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index d292c8d286ebeac22f688d47df8b793e9c90cc6b..a11c5ff2f720187418e22ed602119651672bb717 100644 (file)
@@ -773,6 +773,7 @@ nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
 
 static struct nf_logger nfulnl_logger __read_mostly = {
        .name   = "nfnetlink_log",
+       .type   = NF_LOG_TYPE_ULOG,
        .logfn  = &nfulnl_log_packet,
        .me     = THIS_MODULE,
 };
@@ -1105,6 +1106,9 @@ MODULE_DESCRIPTION("netfilter userspace logging");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
+MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
+MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
+MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
 
 module_init(nfnetlink_log_init);
 module_exit(nfnetlink_log_fini);
index 10cfb156cdf4449dee49e53667549e5c2fccdf82..bde05f28cf14782b3b9f11905fbed37ea8644b95 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2014 Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -41,6 +42,8 @@ static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
        [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
        [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
        [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
+       [NFTA_LOG_LEVEL]        = { .type = NLA_U32 },
+       [NFTA_LOG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static int nft_log_init(const struct nft_ctx *ctx,
@@ -50,6 +53,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
        struct nft_log *priv = nft_expr_priv(expr);
        struct nf_loginfo *li = &priv->loginfo;
        const struct nlattr *nla;
+       int ret;
 
        nla = tb[NFTA_LOG_PREFIX];
        if (nla != NULL) {
@@ -57,30 +61,74 @@ static int nft_log_init(const struct nft_ctx *ctx,
                if (priv->prefix == NULL)
                        return -ENOMEM;
                nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
-       } else
+       } else {
                priv->prefix = (char *)nft_log_null_prefix;
+       }
 
-       li->type = NF_LOG_TYPE_ULOG;
+       li->type = NF_LOG_TYPE_LOG;
+       if (tb[NFTA_LOG_LEVEL] != NULL &&
+           tb[NFTA_LOG_GROUP] != NULL)
+               return -EINVAL;
        if (tb[NFTA_LOG_GROUP] != NULL)
+               li->type = NF_LOG_TYPE_ULOG;
+
+       switch (li->type) {
+       case NF_LOG_TYPE_LOG:
+               if (tb[NFTA_LOG_LEVEL] != NULL) {
+                       li->u.log.level =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
+               } else {
+                       li->u.log.level = 4;
+               }
+               if (tb[NFTA_LOG_FLAGS] != NULL) {
+                       li->u.log.logflags =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
+               }
+               break;
+       case NF_LOG_TYPE_ULOG:
                li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+               if (tb[NFTA_LOG_SNAPLEN] != NULL) {
+                       li->u.ulog.copy_len =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+               }
+               if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+                       li->u.ulog.qthreshold =
+                               ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+               }
+               break;
+       }
 
-       if (tb[NFTA_LOG_SNAPLEN] != NULL)
-               li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
-       if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
-               li->u.ulog.qthreshold =
-                       ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+       if (ctx->afi->family == NFPROTO_INET) {
+               ret = nf_logger_find_get(NFPROTO_IPV4, li->type);
+               if (ret < 0)
+                       return ret;
+
+               ret = nf_logger_find_get(NFPROTO_IPV6, li->type);
+               if (ret < 0) {
+                       nf_logger_put(NFPROTO_IPV4, li->type);
+                       return ret;
+               }
+               return 0;
        }
 
-       return 0;
+       return nf_logger_find_get(ctx->afi->family, li->type);
 }
 
 static void nft_log_destroy(const struct nft_ctx *ctx,
                            const struct nft_expr *expr)
 {
        struct nft_log *priv = nft_expr_priv(expr);
+       struct nf_loginfo *li = &priv->loginfo;
 
        if (priv->prefix != nft_log_null_prefix)
                kfree(priv->prefix);
+
+       if (ctx->afi->family == NFPROTO_INET) {
+               nf_logger_put(NFPROTO_IPV4, li->type);
+               nf_logger_put(NFPROTO_IPV6, li->type);
+       } else {
+               nf_logger_put(ctx->afi->family, li->type);
+       }
 }
 
 static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -91,17 +139,33 @@ static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (priv->prefix != nft_log_null_prefix)
                if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
                        goto nla_put_failure;
-       if (li->u.ulog.group)
-               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
-                       goto nla_put_failure;
-       if (li->u.ulog.copy_len)
-               if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
-                                htonl(li->u.ulog.copy_len)))
+       switch (li->type) {
+       case NF_LOG_TYPE_LOG:
+               if (nla_put_be32(skb, NFTA_LOG_LEVEL, htonl(li->u.log.level)))
                        goto nla_put_failure;
-       if (li->u.ulog.qthreshold)
-               if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
-                                htons(li->u.ulog.qthreshold)))
+
+               if (li->u.log.logflags) {
+                       if (nla_put_be32(skb, NFTA_LOG_FLAGS,
+                                        htonl(li->u.log.logflags)))
+                               goto nla_put_failure;
+               }
+               break;
+       case NF_LOG_TYPE_ULOG:
+               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
                        goto nla_put_failure;
+
+               if (li->u.ulog.copy_len) {
+                       if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+                                        htonl(li->u.ulog.copy_len)))
+                               goto nla_put_failure;
+               }
+               if (li->u.ulog.qthreshold) {
+                       if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+                                        htons(li->u.ulog.qthreshold)))
+                               goto nla_put_failure;
+               }
+               break;
+       }
        return 0;
 
 nla_put_failure:
index 227aa11e8409bb18be93c0c432a4d71cfb6e8f38..47b978bc310039626232525ee08849df56e909ca 100644 (file)
@@ -711,28 +711,15 @@ void xt_free_table_info(struct xt_table_info *info)
 {
        int cpu;
 
-       for_each_possible_cpu(cpu) {
-               if (info->size <= PAGE_SIZE)
-                       kfree(info->entries[cpu]);
-               else
-                       vfree(info->entries[cpu]);
-       }
+       for_each_possible_cpu(cpu)
+               kvfree(info->entries[cpu]);
 
        if (info->jumpstack != NULL) {
-               if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
-                       for_each_possible_cpu(cpu)
-                               vfree(info->jumpstack[cpu]);
-               } else {
-                       for_each_possible_cpu(cpu)
-                               kfree(info->jumpstack[cpu]);
-               }
+               for_each_possible_cpu(cpu)
+                       kvfree(info->jumpstack[cpu]);
+               kvfree(info->jumpstack);
        }
 
-       if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
-               vfree(info->jumpstack);
-       else
-               kfree(info->jumpstack);
-
        free_percpu(info->stackptr);
 
        kfree(info);
index 5ab24843370a0b452e8647b4b0a99d54c96d6087..c13b79440ede6887588ed10144c7730cb8265b3d 100644 (file)
 #include <linux/netfilter/xt_LOG.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/nf_log.h>
-#include <net/netfilter/xt_log.h>
-
-static struct nf_loginfo default_loginfo = {
-       .type   = NF_LOG_TYPE_LOG,
-       .u = {
-               .log = {
-                       .level    = 5,
-                       .logflags = NF_LOG_MASK,
-               },
-       },
-};
-
-static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
-                          u8 proto, int fragment, unsigned int offset)
-{
-       struct udphdr _udph;
-       const struct udphdr *uh;
-
-       if (proto == IPPROTO_UDP)
-               /* Max length: 10 "PROTO=UDP "     */
-               sb_add(m, "PROTO=UDP ");
-       else    /* Max length: 14 "PROTO=UDPLITE " */
-               sb_add(m, "PROTO=UDPLITE ");
-
-       if (fragment)
-               goto out;
-
-       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-       uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
-       if (uh == NULL) {
-               sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
-
-               return 1;
-       }
-
-       /* Max length: 20 "SPT=65535 DPT=65535 " */
-       sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
-               ntohs(uh->len));
-
-out:
-       return 0;
-}
-
-static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
-                          u8 proto, int fragment, unsigned int offset,
-                          unsigned int logflags)
-{
-       struct tcphdr _tcph;
-       const struct tcphdr *th;
-
-       /* Max length: 10 "PROTO=TCP " */
-       sb_add(m, "PROTO=TCP ");
-
-       if (fragment)
-               return 0;
-
-       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-       th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
-               return 1;
-       }
-
-       /* Max length: 20 "SPT=65535 DPT=65535 " */
-       sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
-       /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
-       if (logflags & XT_LOG_TCPSEQ)
-               sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
-
-       /* Max length: 13 "WINDOW=65535 " */
-       sb_add(m, "WINDOW=%u ", ntohs(th->window));
-       /* Max length: 9 "RES=0x3C " */
-       sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
-                                           TCP_RESERVED_BITS) >> 22));
-       /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
-       if (th->cwr)
-               sb_add(m, "CWR ");
-       if (th->ece)
-               sb_add(m, "ECE ");
-       if (th->urg)
-               sb_add(m, "URG ");
-       if (th->ack)
-               sb_add(m, "ACK ");
-       if (th->psh)
-               sb_add(m, "PSH ");
-       if (th->rst)
-               sb_add(m, "RST ");
-       if (th->syn)
-               sb_add(m, "SYN ");
-       if (th->fin)
-               sb_add(m, "FIN ");
-       /* Max length: 11 "URGP=65535 " */
-       sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
-
-       if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
-               u_int8_t _opt[60 - sizeof(struct tcphdr)];
-               const u_int8_t *op;
-               unsigned int i;
-               unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
-
-               op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
-                                       optsize, _opt);
-               if (op == NULL) {
-                       sb_add(m, "OPT (TRUNCATED)");
-                       return 1;
-               }
-
-               /* Max length: 127 "OPT (" 15*4*2chars ") " */
-               sb_add(m, "OPT (");
-               for (i = 0; i < optsize; i++)
-                       sb_add(m, "%02X", op[i]);
-
-               sb_add(m, ") ");
-       }
-
-       return 0;
-}
-
-static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
-{
-       if (!sk || sk->sk_state == TCP_TIME_WAIT)
-               return;
-
-       read_lock_bh(&sk->sk_callback_lock);
-       if (sk->sk_socket && sk->sk_socket->file) {
-               const struct cred *cred = sk->sk_socket->file->f_cred;
-               sb_add(m, "UID=%u GID=%u ",
-                       from_kuid_munged(&init_user_ns, cred->fsuid),
-                       from_kgid_munged(&init_user_ns, cred->fsgid));
-       }
-       read_unlock_bh(&sk->sk_callback_lock);
-}
-
-/* One level of recursion won't kill us */
-static void dump_ipv4_packet(struct sbuff *m,
-                       const struct nf_loginfo *info,
-                       const struct sk_buff *skb,
-                       unsigned int iphoff)
-{
-       struct iphdr _iph;
-       const struct iphdr *ih;
-       unsigned int logflags;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-       else
-               logflags = NF_LOG_MASK;
-
-       ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
-       if (ih == NULL) {
-               sb_add(m, "TRUNCATED");
-               return;
-       }
-
-       /* Important fields:
-        * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
-       /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
-       sb_add(m, "SRC=%pI4 DST=%pI4 ",
-              &ih->saddr, &ih->daddr);
-
-       /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
-       sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
-              ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
-              ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
-
-       /* Max length: 6 "CE DF MF " */
-       if (ntohs(ih->frag_off) & IP_CE)
-               sb_add(m, "CE ");
-       if (ntohs(ih->frag_off) & IP_DF)
-               sb_add(m, "DF ");
-       if (ntohs(ih->frag_off) & IP_MF)
-               sb_add(m, "MF ");
-
-       /* Max length: 11 "FRAG:65535 " */
-       if (ntohs(ih->frag_off) & IP_OFFSET)
-               sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
-
-       if ((logflags & XT_LOG_IPOPT) &&
-           ih->ihl * 4 > sizeof(struct iphdr)) {
-               const unsigned char *op;
-               unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
-               unsigned int i, optsize;
-
-               optsize = ih->ihl * 4 - sizeof(struct iphdr);
-               op = skb_header_pointer(skb, iphoff+sizeof(_iph),
-                                       optsize, _opt);
-               if (op == NULL) {
-                       sb_add(m, "TRUNCATED");
-                       return;
-               }
-
-               /* Max length: 127 "OPT (" 15*4*2chars ") " */
-               sb_add(m, "OPT (");
-               for (i = 0; i < optsize; i++)
-                       sb_add(m, "%02X", op[i]);
-               sb_add(m, ") ");
-       }
-
-       switch (ih->protocol) {
-       case IPPROTO_TCP:
-               if (dump_tcp_header(m, skb, ih->protocol,
-                                   ntohs(ih->frag_off) & IP_OFFSET,
-                                   iphoff+ih->ihl*4, logflags))
-                       return;
-               break;
-       case IPPROTO_UDP:
-       case IPPROTO_UDPLITE:
-               if (dump_udp_header(m, skb, ih->protocol,
-                                   ntohs(ih->frag_off) & IP_OFFSET,
-                                   iphoff+ih->ihl*4))
-                       return;
-               break;
-       case IPPROTO_ICMP: {
-               struct icmphdr _icmph;
-               const struct icmphdr *ich;
-               static const size_t required_len[NR_ICMP_TYPES+1]
-                       = { [ICMP_ECHOREPLY] = 4,
-                           [ICMP_DEST_UNREACH]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_SOURCE_QUENCH]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_REDIRECT]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_ECHO] = 4,
-                           [ICMP_TIME_EXCEEDED]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_PARAMETERPROB]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_TIMESTAMP] = 20,
-                           [ICMP_TIMESTAMPREPLY] = 20,
-                           [ICMP_ADDRESS] = 12,
-                           [ICMP_ADDRESSREPLY] = 12 };
-
-               /* Max length: 11 "PROTO=ICMP " */
-               sb_add(m, "PROTO=ICMP ");
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
-                                        sizeof(_icmph), &_icmph);
-               if (ich == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Max length: 18 "TYPE=255 CODE=255 " */
-               sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               if (ich->type <= NR_ICMP_TYPES &&
-                   required_len[ich->type] &&
-                   skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               switch (ich->type) {
-               case ICMP_ECHOREPLY:
-               case ICMP_ECHO:
-                       /* Max length: 19 "ID=65535 SEQ=65535 " */
-                       sb_add(m, "ID=%u SEQ=%u ",
-                              ntohs(ich->un.echo.id),
-                              ntohs(ich->un.echo.sequence));
-                       break;
-
-               case ICMP_PARAMETERPROB:
-                       /* Max length: 14 "PARAMETER=255 " */
-                       sb_add(m, "PARAMETER=%u ",
-                              ntohl(ich->un.gateway) >> 24);
-                       break;
-               case ICMP_REDIRECT:
-                       /* Max length: 24 "GATEWAY=255.255.255.255 " */
-                       sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
-                       /* Fall through */
-               case ICMP_DEST_UNREACH:
-               case ICMP_SOURCE_QUENCH:
-               case ICMP_TIME_EXCEEDED:
-                       /* Max length: 3+maxlen */
-                       if (!iphoff) { /* Only recurse once. */
-                               sb_add(m, "[");
-                               dump_ipv4_packet(m, info, skb,
-                                           iphoff + ih->ihl*4+sizeof(_icmph));
-                               sb_add(m, "] ");
-                       }
-
-                       /* Max length: 10 "MTU=65535 " */
-                       if (ich->type == ICMP_DEST_UNREACH &&
-                           ich->code == ICMP_FRAG_NEEDED)
-                               sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
-               }
-               break;
-       }
-       /* Max Length */
-       case IPPROTO_AH: {
-               struct ip_auth_hdr _ahdr;
-               const struct ip_auth_hdr *ah;
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 9 "PROTO=AH " */
-               sb_add(m, "PROTO=AH ");
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
-                                       sizeof(_ahdr), &_ahdr);
-               if (ah == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Length: 15 "SPI=0xF1234567 " */
-               sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
-               break;
-       }
-       case IPPROTO_ESP: {
-               struct ip_esp_hdr _esph;
-               const struct ip_esp_hdr *eh;
-
-               /* Max length: 10 "PROTO=ESP " */
-               sb_add(m, "PROTO=ESP ");
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
-                                       sizeof(_esph), &_esph);
-               if (eh == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Length: 15 "SPI=0xF1234567 " */
-               sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
-               break;
-       }
-       /* Max length: 10 "PROTO 255 " */
-       default:
-               sb_add(m, "PROTO=%u ", ih->protocol);
-       }
-
-       /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && !iphoff)
-               dump_sk_uid_gid(m, skb->sk);
-
-       /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (!iphoff && skb->mark)
-               sb_add(m, "MARK=0x%x ", skb->mark);
-
-       /* Proto    Max log string length */
-       /* IP:      40+46+6+11+127 = 230 */
-       /* TCP:     10+max(25,20+30+13+9+32+11+127) = 252 */
-       /* UDP:     10+max(25,20) = 35 */
-       /* UDPLITE: 14+max(25,20) = 39 */
-       /* ICMP:    11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
-       /* ESP:     10+max(25)+15 = 50 */
-       /* AH:      9+max(25)+15 = 49 */
-       /* unknown: 10 */
-
-       /* (ICMP allows recursion one level deep) */
-       /* maxlen =  IP + ICMP +  IP + max(TCP,UDP,ICMP,unknown) */
-       /* maxlen = 230+   91  + 230 + 252 = 803 */
-}
-
-static void dump_ipv4_mac_header(struct sbuff *m,
-                           const struct nf_loginfo *info,
-                           const struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       unsigned int logflags = 0;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-
-       if (!(logflags & XT_LOG_MACDECODE))
-               goto fallback;
-
-       switch (dev->type) {
-       case ARPHRD_ETHER:
-               sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
-                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
-                      ntohs(eth_hdr(skb)->h_proto));
-               return;
-       default:
-               break;
-       }
-
-fallback:
-       sb_add(m, "MAC=");
-       if (dev->hard_header_len &&
-           skb->mac_header != skb->network_header) {
-               const unsigned char *p = skb_mac_header(skb);
-               unsigned int i;
-
-               sb_add(m, "%02x", *p++);
-               for (i = 1; i < dev->hard_header_len; i++, p++)
-                       sb_add(m, ":%02x", *p);
-       }
-       sb_add(m, " ");
-}
-
-static void
-log_packet_common(struct sbuff *m,
-                 u_int8_t pf,
-                 unsigned int hooknum,
-                 const struct sk_buff *skb,
-                 const struct net_device *in,
-                 const struct net_device *out,
-                 const struct nf_loginfo *loginfo,
-                 const char *prefix)
-{
-       sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
-              '0' + loginfo->u.log.level, prefix,
-              in ? in->name : "",
-              out ? out->name : "");
-#ifdef CONFIG_BRIDGE_NETFILTER
-       if (skb->nf_bridge) {
-               const struct net_device *physindev;
-               const struct net_device *physoutdev;
-
-               physindev = skb->nf_bridge->physindev;
-               if (physindev && in != physindev)
-                       sb_add(m, "PHYSIN=%s ", physindev->name);
-               physoutdev = skb->nf_bridge->physoutdev;
-               if (physoutdev && out != physoutdev)
-                       sb_add(m, "PHYSOUT=%s ", physoutdev->name);
-       }
-#endif
-}
-
-
-static void
-ipt_log_packet(struct net *net,
-              u_int8_t pf,
-              unsigned int hooknum,
-              const struct sk_buff *skb,
-              const struct net_device *in,
-              const struct net_device *out,
-              const struct nf_loginfo *loginfo,
-              const char *prefix)
-{
-       struct sbuff *m;
-
-       /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
-               return;
-
-       m = sb_open();
-
-       if (!loginfo)
-               loginfo = &default_loginfo;
-
-       log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
-
-       if (in != NULL)
-               dump_ipv4_mac_header(m, loginfo, skb);
-
-       dump_ipv4_packet(m, loginfo, skb, 0);
-
-       sb_close(m);
-}
-
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-/* One level of recursion won't kill us */
-static void dump_ipv6_packet(struct sbuff *m,
-                       const struct nf_loginfo *info,
-                       const struct sk_buff *skb, unsigned int ip6hoff,
-                       int recurse)
-{
-       u_int8_t currenthdr;
-       int fragment;
-       struct ipv6hdr _ip6h;
-       const struct ipv6hdr *ih;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int logflags;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-       else
-               logflags = NF_LOG_MASK;
-
-       ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
-       if (ih == NULL) {
-               sb_add(m, "TRUNCATED");
-               return;
-       }
-
-       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
-       sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
-
-       /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
-       sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
-              ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
-              (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
-              ih->hop_limit,
-              (ntohl(*(__be32 *)ih) & 0x000fffff));
-
-       fragment = 0;
-       ptr = ip6hoff + sizeof(struct ipv6hdr);
-       currenthdr = ih->nexthdr;
-       while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
-               struct ipv6_opt_hdr _hdr;
-               const struct ipv6_opt_hdr *hp;
-
-               hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
-               if (hp == NULL) {
-                       sb_add(m, "TRUNCATED");
-                       return;
-               }
-
-               /* Max length: 48 "OPT (...) " */
-               if (logflags & XT_LOG_IPOPT)
-                       sb_add(m, "OPT ( ");
-
-               switch (currenthdr) {
-               case IPPROTO_FRAGMENT: {
-                       struct frag_hdr _fhdr;
-                       const struct frag_hdr *fh;
-
-                       sb_add(m, "FRAG:");
-                       fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
-                                               &_fhdr);
-                       if (fh == NULL) {
-                               sb_add(m, "TRUNCATED ");
-                               return;
-                       }
-
-                       /* Max length: 6 "65535 " */
-                       sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
-
-                       /* Max length: 11 "INCOMPLETE " */
-                       if (fh->frag_off & htons(0x0001))
-                               sb_add(m, "INCOMPLETE ");
-
-                       sb_add(m, "ID:%08x ", ntohl(fh->identification));
-
-                       if (ntohs(fh->frag_off) & 0xFFF8)
-                               fragment = 1;
-
-                       hdrlen = 8;
-
-                       break;
-               }
-               case IPPROTO_DSTOPTS:
-               case IPPROTO_ROUTING:
-               case IPPROTO_HOPOPTS:
-                       if (fragment) {
-                               if (logflags & XT_LOG_IPOPT)
-                                       sb_add(m, ")");
-                               return;
-                       }
-                       hdrlen = ipv6_optlen(hp);
-                       break;
-               /* Max Length */
-               case IPPROTO_AH:
-                       if (logflags & XT_LOG_IPOPT) {
-                               struct ip_auth_hdr _ahdr;
-                               const struct ip_auth_hdr *ah;
-
-                               /* Max length: 3 "AH " */
-                               sb_add(m, "AH ");
-
-                               if (fragment) {
-                                       sb_add(m, ")");
-                                       return;
-                               }
-
-                               ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
-                                                       &_ahdr);
-                               if (ah == NULL) {
-                                       /*
-                                        * Max length: 26 "INCOMPLETE [65535
-                                        *  bytes] )"
-                                        */
-                                       sb_add(m, "INCOMPLETE [%u bytes] )",
-                                              skb->len - ptr);
-                                       return;
-                               }
-
-                               /* Length: 15 "SPI=0xF1234567 */
-                               sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
-
-                       }
-
-                       hdrlen = (hp->hdrlen+2)<<2;
-                       break;
-               case IPPROTO_ESP:
-                       if (logflags & XT_LOG_IPOPT) {
-                               struct ip_esp_hdr _esph;
-                               const struct ip_esp_hdr *eh;
-
-                               /* Max length: 4 "ESP " */
-                               sb_add(m, "ESP ");
-
-                               if (fragment) {
-                                       sb_add(m, ")");
-                                       return;
-                               }
-
-                               /*
-                                * Max length: 26 "INCOMPLETE [65535 bytes] )"
-                                */
-                               eh = skb_header_pointer(skb, ptr, sizeof(_esph),
-                                                       &_esph);
-                               if (eh == NULL) {
-                                       sb_add(m, "INCOMPLETE [%u bytes] )",
-                                              skb->len - ptr);
-                                       return;
-                               }
-
-                               /* Length: 16 "SPI=0xF1234567 )" */
-                               sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
-
-                       }
-                       return;
-               default:
-                       /* Max length: 20 "Unknown Ext Hdr 255" */
-                       sb_add(m, "Unknown Ext Hdr %u", currenthdr);
-                       return;
-               }
-               if (logflags & XT_LOG_IPOPT)
-                       sb_add(m, ") ");
-
-               currenthdr = hp->nexthdr;
-               ptr += hdrlen;
-       }
-
-       switch (currenthdr) {
-       case IPPROTO_TCP:
-               if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
-                   logflags))
-                       return;
-               break;
-       case IPPROTO_UDP:
-       case IPPROTO_UDPLITE:
-               if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
-                       return;
-               break;
-       case IPPROTO_ICMPV6: {
-               struct icmp6hdr _icmp6h;
-               const struct icmp6hdr *ic;
-
-               /* Max length: 13 "PROTO=ICMPv6 " */
-               sb_add(m, "PROTO=ICMPv6 ");
-
-               if (fragment)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
-               if (ic == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
-                       return;
-               }
-
-               /* Max length: 18 "TYPE=255 CODE=255 " */
-               sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
-
-               switch (ic->icmp6_type) {
-               case ICMPV6_ECHO_REQUEST:
-               case ICMPV6_ECHO_REPLY:
-                       /* Max length: 19 "ID=65535 SEQ=65535 " */
-                       sb_add(m, "ID=%u SEQ=%u ",
-                               ntohs(ic->icmp6_identifier),
-                               ntohs(ic->icmp6_sequence));
-                       break;
-               case ICMPV6_MGM_QUERY:
-               case ICMPV6_MGM_REPORT:
-               case ICMPV6_MGM_REDUCTION:
-                       break;
-
-               case ICMPV6_PARAMPROB:
-                       /* Max length: 17 "POINTER=ffffffff " */
-                       sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
-                       /* Fall through */
-               case ICMPV6_DEST_UNREACH:
-               case ICMPV6_PKT_TOOBIG:
-               case ICMPV6_TIME_EXCEED:
-                       /* Max length: 3+maxlen */
-                       if (recurse) {
-                               sb_add(m, "[");
-                               dump_ipv6_packet(m, info, skb,
-                                           ptr + sizeof(_icmp6h), 0);
-                               sb_add(m, "] ");
-                       }
-
-                       /* Max length: 10 "MTU=65535 " */
-                       if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
-                               sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
-               }
-               break;
-       }
-       /* Max length: 10 "PROTO=255 " */
-       default:
-               sb_add(m, "PROTO=%u ", currenthdr);
-       }
-
-       /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && recurse)
-               dump_sk_uid_gid(m, skb->sk);
-
-       /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (recurse && skb->mark)
-               sb_add(m, "MARK=0x%x ", skb->mark);
-}
-
-static void dump_ipv6_mac_header(struct sbuff *m,
-                           const struct nf_loginfo *info,
-                           const struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       unsigned int logflags = 0;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-
-       if (!(logflags & XT_LOG_MACDECODE))
-               goto fallback;
-
-       switch (dev->type) {
-       case ARPHRD_ETHER:
-               sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
-                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
-                      ntohs(eth_hdr(skb)->h_proto));
-               return;
-       default:
-               break;
-       }
-
-fallback:
-       sb_add(m, "MAC=");
-       if (dev->hard_header_len &&
-           skb->mac_header != skb->network_header) {
-               const unsigned char *p = skb_mac_header(skb);
-               unsigned int len = dev->hard_header_len;
-               unsigned int i;
-
-               if (dev->type == ARPHRD_SIT) {
-                       p -= ETH_HLEN;
-
-                       if (p < skb->head)
-                               p = NULL;
-               }
-
-               if (p != NULL) {
-                       sb_add(m, "%02x", *p++);
-                       for (i = 1; i < len; i++)
-                               sb_add(m, ":%02x", *p++);
-               }
-               sb_add(m, " ");
-
-               if (dev->type == ARPHRD_SIT) {
-                       const struct iphdr *iph =
-                               (struct iphdr *)skb_mac_header(skb);
-                       sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
-                              &iph->daddr);
-               }
-       } else
-               sb_add(m, " ");
-}
-
-static void
-ip6t_log_packet(struct net *net,
-               u_int8_t pf,
-               unsigned int hooknum,
-               const struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               const struct nf_loginfo *loginfo,
-               const char *prefix)
-{
-       struct sbuff *m;
-
-       /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
-               return;
-
-       m = sb_open();
-
-       if (!loginfo)
-               loginfo = &default_loginfo;
-
-       log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
-
-       if (in != NULL)
-               dump_ipv6_mac_header(m, loginfo, skb);
-
-       dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
-
-       sb_close(m);
-}
-#endif
 
 static unsigned int
 log_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -839,17 +39,8 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
-       if (par->family == NFPROTO_IPV4)
-               ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
-                              par->out, &li, loginfo->prefix);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       else if (par->family == NFPROTO_IPV6)
-               ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
-                               par->out, &li, loginfo->prefix);
-#endif
-       else
-               WARN_ON_ONCE(1);
-
+       nf_log_packet(net, par->family, par->hooknum, skb, par->in, par->out,
+                     &li, "%s", loginfo->prefix);
        return XT_CONTINUE;
 }
 
@@ -870,7 +61,12 @@ static int log_tg_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        }
 
-       return 0;
+       return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+}
+
+static void log_tg_destroy(const struct xt_tgdtor_param *par)
+{
+       nf_logger_put(par->family, NF_LOG_TYPE_LOG);
 }
 
 static struct xt_target log_tg_regs[] __read_mostly = {
@@ -880,6 +76,7 @@ static struct xt_target log_tg_regs[] __read_mostly = {
                .target         = log_tg,
                .targetsize     = sizeof(struct xt_log_info),
                .checkentry     = log_tg_check,
+               .destroy        = log_tg_destroy,
                .me             = THIS_MODULE,
        },
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -889,78 +86,19 @@ static struct xt_target log_tg_regs[] __read_mostly = {
                .target         = log_tg,
                .targetsize     = sizeof(struct xt_log_info),
                .checkentry     = log_tg_check,
+               .destroy        = log_tg_destroy,
                .me             = THIS_MODULE,
        },
 #endif
 };
 
-static struct nf_logger ipt_log_logger __read_mostly = {
-       .name           = "ipt_LOG",
-       .logfn          = &ipt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static struct nf_logger ip6t_log_logger __read_mostly = {
-       .name           = "ip6t_LOG",
-       .logfn          = &ip6t_log_packet,
-       .me             = THIS_MODULE,
-};
-#endif
-
-static int __net_init log_net_init(struct net *net)
-{
-       nf_log_set(net, NFPROTO_IPV4, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_set(net, NFPROTO_IPV6, &ip6t_log_logger);
-#endif
-       return 0;
-}
-
-static void __net_exit log_net_exit(struct net *net)
-{
-       nf_log_unset(net, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_unset(net, &ip6t_log_logger);
-#endif
-}
-
-static struct pernet_operations log_net_ops = {
-       .init = log_net_init,
-       .exit = log_net_exit,
-};
-
 static int __init log_tg_init(void)
 {
-       int ret;
-
-       ret = register_pernet_subsys(&log_net_ops);
-       if (ret < 0)
-               goto err_pernet;
-
-       ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
-       if (ret < 0)
-               goto err_target;
-
-       nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
-#endif
-       return 0;
-
-err_target:
-       unregister_pernet_subsys(&log_net_ops);
-err_pernet:
-       return ret;
+       return xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
 }
 
 static void __exit log_tg_exit(void)
 {
-       unregister_pernet_subsys(&log_net_ops);
-       nf_log_unregister(&ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_unregister(&ip6t_log_logger);
-#endif
        xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
 }
 
index 3045a964f39c82152574ef4479e01a0254c88f5a..fe9415e5f91d34a9cb5d99d63dc95050da2a93c3 100644 (file)
@@ -170,7 +170,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
 #endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
-                       break;
                }
 
                entry->def.addrsel = addrmap;
index e6fac7e3db52e5fcb40629a60472ff2c7aa72dcb..1b38f7fe12f1f8e09006752333ca7ef5026294c7 100644 (file)
@@ -170,7 +170,6 @@ EXPORT_SYMBOL_GPL(netlink_remove_tap);
 static bool netlink_filter_tap(const struct sk_buff *skb)
 {
        struct sock *sk = skb->sk;
-       bool pass = false;
 
        /* We take the more conservative approach and
         * whitelist socket protocols that may pass.
@@ -184,11 +183,10 @@ static bool netlink_filter_tap(const struct sk_buff *skb)
        case NETLINK_FIB_LOOKUP:
        case NETLINK_NETFILTER:
        case NETLINK_GENERIC:
-               pass = true;
-               break;
+               return true;
        }
 
-       return pass;
+       return false;
 }
 
 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
@@ -1961,25 +1959,25 @@ struct netlink_broadcast_data {
        void *tx_data;
 };
 
-static int do_one_broadcast(struct sock *sk,
-                                  struct netlink_broadcast_data *p)
+static void do_one_broadcast(struct sock *sk,
+                                   struct netlink_broadcast_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        int val;
 
        if (p->exclude_sk == sk)
-               goto out;
+               return;
 
        if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
-               goto out;
+               return;
 
        if (!net_eq(sock_net(sk), p->net))
-               goto out;
+               return;
 
        if (p->failure) {
                netlink_overrun(sk);
-               goto out;
+               return;
        }
 
        sock_hold(sk);
@@ -2017,9 +2015,6 @@ static int do_one_broadcast(struct sock *sk,
                p->skb2 = NULL;
        }
        sock_put(sk);
-
-out:
-       return 0;
 }
 
 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
index ede50d197e10dfaf3ec73670f40a82ae94c51111..71cf1bffea060a23ca2452e34a02109bd9186a6b 100644 (file)
@@ -1418,7 +1418,7 @@ static int __init nr_proto_init(void)
                struct net_device *dev;
 
                sprintf(name, "nr%d", i);
-               dev = alloc_netdev(0, name, nr_setup);
+               dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
                        printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
                        goto fail;
index 171cb9949ab58560bc3d1c997d963e2928cf5aa5..37deb173c956d350cdd3ed395a233525fecb5f55 100644 (file)
@@ -457,12 +457,10 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
                pr_err("Received a ACK/NACK PDU\n");
                rc = -EINVAL;
                goto exit;
-               break;
        case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
                pr_err("Received a SUPERVISOR PDU\n");
                rc = -EINVAL;
                goto exit;
-               break;
        }
 
        skb_pull(resp, size);
index 9db4bf6740d1e1dc08c60200f1fe8f82f4a370d5..20f59b62721a69d64ddfd1a4a269f2ee0f003905 100644 (file)
@@ -66,16 +66,16 @@ static struct genl_family dp_packet_genl_family;
 static struct genl_family dp_flow_genl_family;
 static struct genl_family dp_datapath_genl_family;
 
-static struct genl_multicast_group ovs_dp_flow_multicast_group = {
-       .name = OVS_FLOW_MCGROUP
+static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
+       .name = OVS_FLOW_MCGROUP,
 };
 
-static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
-       .name = OVS_DATAPATH_MCGROUP
+static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+       .name = OVS_DATAPATH_MCGROUP,
 };
 
-struct genl_multicast_group ovs_dp_vport_multicast_group = {
-       .name = OVS_VPORT_MCGROUP
+static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
+       .name = OVS_VPORT_MCGROUP,
 };
 
 /* Check if need to build a reply message.
@@ -1189,7 +1189,7 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
        [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
 };
 
-static struct genl_ops dp_flow_genl_ops[] = {
+static const struct genl_ops dp_flow_genl_ops[] = {
        { .cmd = OVS_FLOW_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = flow_policy,
@@ -1577,7 +1577,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
        [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
 };
 
-static struct genl_ops dp_datapath_genl_ops[] = {
+static const struct genl_ops dp_datapath_genl_ops[] = {
        { .cmd = OVS_DP_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = datapath_policy,
@@ -1944,7 +1944,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
        [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
 };
 
-static struct genl_ops dp_vport_genl_ops[] = {
+static const struct genl_ops dp_vport_genl_ops[] = {
        { .cmd = OVS_VPORT_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = vport_policy,
@@ -2053,10 +2053,14 @@ static int __init dp_init(void)
 
        pr_info("Open vSwitch switching datapath\n");
 
-       err = ovs_flow_init();
+       err = ovs_internal_dev_rtnl_link_register();
        if (err)
                goto error;
 
+       err = ovs_flow_init();
+       if (err)
+               goto error_unreg_rtnl_link;
+
        err = ovs_vport_init();
        if (err)
                goto error_flow_exit;
@@ -2083,6 +2087,8 @@ error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
        ovs_flow_exit();
+error_unreg_rtnl_link:
+       ovs_internal_dev_rtnl_link_unregister();
 error:
        return err;
 }
@@ -2095,6 +2101,7 @@ static void dp_cleanup(void)
        rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
+       ovs_internal_dev_rtnl_link_unregister();
 }
 
 module_init(dp_init);
index 789af9280e77264b4d7f65ddb6c333e96fa4147f..bd658555afdfcd2a05a22d61c3a46b524c7d3ebd 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <net/dst.h>
 #include <net/xfrm.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
 #include "vport-internal_dev.h"
@@ -121,6 +122,10 @@ static const struct net_device_ops internal_dev_netdev_ops = {
        .ndo_get_stats64 = internal_dev_get_stats,
 };
 
+static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
+       .kind = "openvswitch",
+};
+
 static void do_setup(struct net_device *netdev)
 {
        ether_setup(netdev);
@@ -131,6 +136,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netdev->destructor = internal_dev_destructor;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
+       netdev->rtnl_link_ops = &internal_dev_link_ops;
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
@@ -159,7 +165,8 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        netdev_vport = netdev_vport_priv(vport);
 
        netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-                                        parms->name, do_setup);
+                                        parms->name, NET_NAME_UNKNOWN,
+                                        do_setup);
        if (!netdev_vport->dev) {
                err = -ENOMEM;
                goto error_free_vport;
@@ -248,3 +255,13 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
 
        return internal_dev_priv(netdev)->vport;
 }
+
+int ovs_internal_dev_rtnl_link_register(void)
+{
+       return rtnl_link_register(&internal_dev_link_ops);
+}
+
+void ovs_internal_dev_rtnl_link_unregister(void)
+{
+       rtnl_link_unregister(&internal_dev_link_ops);
+}
index 9a7d30ecc6a25277b0bb0d6bc7e42e84b4ef292d..1b179a190cff14eb274e6b0286021d90818c3803 100644 (file)
@@ -24,5 +24,7 @@
 
 int ovs_is_internal_dev(const struct net_device *);
 struct vport *ovs_internal_dev_get_vport(struct net_device *);
+int ovs_internal_dev_rtnl_link_register(void);
+void ovs_internal_dev_rtnl_link_unregister(void);
 
 #endif /* vport-internal_dev.h */
index 0edbd95c60e73abfba45a145978ac1d829bb1321..d8b7e247bebff5c5f4c3b83345ca5e4f492be60b 100644 (file)
@@ -143,8 +143,6 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        struct rtable *rt;
        struct flowi4 fl;
        __be16 src_port;
-       int port_min;
-       int port_max;
        __be16 df;
        int err;
 
@@ -172,8 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->ignore_df = 1;
 
-       inet_get_local_port_range(net, &port_min, &port_max);
-       src_port = vxlan_src_port(port_min, port_max, skb);
+       src_port = udp_flow_src_port(net, skb, 0, 0, true);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
                             fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
index b85c67ccb797197abf51596ac5f3044131aa97d8..614ca91f785aa4c5006a086cbfd3ee2e25462c7a 100644 (file)
@@ -3071,10 +3071,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
                break;
        case PACKET_MR_PROMISC:
                return dev_set_promiscuity(dev, what);
-               break;
        case PACKET_MR_ALLMULTI:
                return dev_set_allmulti(dev, what);
-               break;
        case PACKET_MR_UNICAST:
                if (i->alen != dev->addr_len)
                        return -EINVAL;
index 66dc65e7c6a1079d09f63c6caa3de115cc22bc54..e9a83a637185aa227c6b0b1a2dc6ebc50aab7385 100644 (file)
@@ -267,7 +267,7 @@ int gprs_attach(struct sock *sk)
                return -EINVAL; /* need packet boundaries */
 
        /* Create net device */
-       dev = alloc_netdev(sizeof(*gp), ifname, gprs_setup);
+       dev = alloc_netdev(sizeof(*gp), ifname, NET_NAME_UNKNOWN, gprs_setup);
        if (!dev)
                return -ENOMEM;
        gp = netdev_priv(dev);
index 8451c8cdc9deb534ed335a065c64a47e570170cd..a85c1a086ae44f71b48be0112e08ae5164ac2c9e 100644 (file)
@@ -1538,7 +1538,7 @@ static int __init rose_proto_init(void)
                char name[IFNAMSIZ];
 
                sprintf(name, "rose%d", i);
-               dev = alloc_netdev(0, name, rose_setup);
+               dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
                if (!dev) {
                        printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
                        rc = -ENOMEM;
index 0ad080790a32a341a1ddc57d632302563964a247..eec998efd02060630d7f550c77741aa65283434c 100644 (file)
@@ -346,7 +346,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
 
        n_elem = ntohl(*xdr++);
        toklen -= 4;
-       if (n_elem < 0 || n_elem > max_n_elem)
+       if (n_elem > max_n_elem)
                return -EINVAL;
        *_n_elem = n_elem;
        if (n_elem > 0) {
index 4f912c0e225b674dfa117060ee547b99feb972ca..eb48306033d91ef131fd67fcd432f73ed55a43cb 100644 (file)
@@ -218,10 +218,12 @@ static int mirred_device_event(struct notifier_block *unused,
 
        if (event == NETDEV_UNREGISTER)
                list_for_each_entry(m, &mirred_list, tcfm_list) {
+                       spin_lock_bh(&m->tcf_lock);
                        if (m->tcfm_dev == dev) {
                                dev_put(dev);
                                m->tcfm_dev = NULL;
                        }
+                       spin_unlock_bh(&m->tcf_lock);
                }
 
        return NOTIFY_DONE;
index 45527e6b52dbf396cbb7415bb0613152a8320096..c28b0d327b124b8b478680293f145214caa6f835 100644 (file)
@@ -561,13 +561,14 @@ EXPORT_SYMBOL(tcf_exts_change);
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
+       struct nlattr *nest;
+
        if (exts->action && !list_empty(&exts->actions)) {
                /*
                 * again for backward compatible mode - we want
                 * to work with both old and new modes of entering
                 * tc data even if iproute2  was newer - jhs
                 */
-               struct nlattr *nest;
                if (exts->type != TCA_OLD_COMPAT) {
                        nest = nla_nest_start(skb, exts->action);
                        if (nest == NULL)
@@ -585,10 +586,14 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
                        nla_nest_end(skb, nest);
                }
        }
-#endif
        return 0;
-nla_put_failure: __attribute__ ((unused))
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
        return -1;
+#else
+       return 0;
+#endif
 }
 EXPORT_SYMBOL(tcf_exts_dump);
 
index bfd34e4c1afc50f6569ecc57eda942ff25e2caea..7c292d474f47861f3d9e293a1e7c55da7f38c5ca 100644 (file)
@@ -125,7 +125,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
 {
        struct can_filter *conf = data; /* Array with rules */
        struct canid_match *cm;
-       struct canid_match *cm_old = (struct canid_match *)m->data;
        int i;
 
        if (!len)
@@ -181,12 +180,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
 
        m->datalen = sizeof(struct canid_match) + len;
        m->data = (unsigned long)cm;
-
-       if (cm_old != NULL) {
-               pr_err("canid: Configuring an existing ematch!\n");
-               kfree(cm_old);
-       }
-
        return 0;
 }
 
index e1543b03e39d10c9b952a7ccf454cbd76dd09f09..fc04fe93c2da2fa8b43b4be97ccd28dba8ca9e3c 100644 (file)
@@ -108,7 +108,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
 
 /*
  * Transmit one skb, and handle the return status as required. Holding the
- * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
+ * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this
  * function.
  *
  * Returns to the caller:
@@ -156,7 +156,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 /*
  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
  *
- * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * __QDISC___STATE_RUNNING guarantees only one CPU can process
  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
  * this queue.
  *
index 474167162947acb83220c539a490c6bd62dc9e8b..bd33793b527e176b07f7bacb77143ed38e32d31c 100644 (file)
@@ -485,8 +485,8 @@ static int __init teql_init(void)
                struct net_device *dev;
                struct teql_master *master;
 
-               dev = alloc_netdev(sizeof(struct teql_master),
-                                 "teql%d", teql_master_setup);
+               dev = alloc_netdev(sizeof(struct teql_master), "teql%d",
+                                  NET_NAME_UNKNOWN, teql_master_setup);
                if (!dev) {
                        err = -ENOMEM;
                        break;
index 5c30b7a873dfc3eeccf841e2486c7866d26024c9..3b4ffb021cf1728353b5311e519604759a03b617 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
          protocol.o endpointola.o associola.o \
          transport.o chunk.o sm_make_chunk.o ulpevent.o \
-         inqueue.o outqueue.o ulpqueue.o command.o \
+         inqueue.o outqueue.o ulpqueue.o \
          tsnmap.o bind_addr.o socket.o primitive.o \
          output.o input.o debug.o ssnmap.o auth.o
 
diff --git a/net/sctp/command.c b/net/sctp/command.c
deleted file mode 100644 (file)
index dd73758..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SCTP kernel implementation Copyright (C) 1999-2001
- * Cisco, Motorola, and IBM
- * Copyright 2001 La Monte H.P. Yarroll
- *
- * This file is part of the SCTP kernel implementation
- *
- * These functions manipulate sctp command sequences.
- *
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- *                 ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING.  If not, see
- * <http://www.gnu.org/licenses/>.
- *
- * Please send any bug reports or fixes you make to the
- * email address(es):
- *    lksctp developers <linux-sctp@vger.kernel.org>
- *
- * Written or modified by:
- *    La Monte H.P. Yarroll <piggy@acm.org>
- *    Karl Knutson <karl@athena.chicago.il.us>
- */
-
-#include <linux/types.h>
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-/* Initialize a block of memory as a command sequence. */
-int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
-{
-       memset(seq, 0, sizeof(sctp_cmd_seq_t));
-       return 1;               /* We always succeed.  */
-}
-
-/* Add a command to a sctp_cmd_seq_t.
- * Return 0 if the command sequence is full.
- */
-void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
-{
-       BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS);
-
-       seq->cmds[seq->next_free_slot].verb = verb;
-       seq->cmds[seq->next_free_slot++].obj = obj;
-}
-
-/* Return the next command structure in a sctp_cmd_seq.
- * Returns NULL at the end of the sequence.
- */
-sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
-{
-       sctp_cmd_t *retval = NULL;
-
-       if (seq->next_cmd < seq->next_free_slot)
-               retval = &seq->cmds[seq->next_cmd++];
-
-       return retval;
-}
-
index 9c77947c0597c1c2f646214584a8480e038105e9..d31435e559b23416371106f0a3b93948de420470 100644 (file)
@@ -1025,7 +1025,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
 
                                sctp_outq_head_data(q, chunk);
                                goto sctp_flush_out;
-                               break;
 
                        case SCTP_XMIT_OK:
                                /* The sender is in the SHUTDOWN-PENDING state,
index 5170a1ff95a1dba56ffafc30a9a4fdadf25ce1d4..d3f1ea460c500caf6e39c1a9b2f31f2de28f37a4 100644 (file)
@@ -4182,7 +4182,6 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
        case SCTP_CID_ACTION_DISCARD:
                /* Discard the packet.  */
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-               break;
        case SCTP_CID_ACTION_DISCARD_ERR:
                /* Generate an ERROR chunk as response. */
                hdr = unk_chunk->chunk_hdr;
@@ -4198,11 +4197,9 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
                /* Discard the packet.  */
                sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
-               break;
        case SCTP_CID_ACTION_SKIP:
                /* Skip the chunk.  */
                return SCTP_DISPOSITION_DISCARD;
-               break;
        case SCTP_CID_ACTION_SKIP_ERR:
                /* Generate an ERROR chunk as response. */
                hdr = unk_chunk->chunk_hdr;
@@ -4216,7 +4213,6 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
                }
                /* Skip the chunk.  */
                return SCTP_DISPOSITION_CONSUME;
-               break;
        default:
                break;
        }
index 429899689408caec64cf5c36b4eb0b00369f8e48..743308f40544c743e3cd78d8e77dab55156d57a5 100644 (file)
@@ -1602,12 +1602,13 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct sctp_initmsg *sinit;
        sctp_assoc_t associd = 0;
        sctp_cmsgs_t cmsgs = { NULL };
-       int err;
        sctp_scope_t scope;
-       long timeo;
-       __u16 sinfo_flags = 0;
+       bool fill_sinfo_ttl = false;
        struct sctp_datamsg *datamsg;
        int msg_flags = msg->msg_flags;
+       __u16 sinfo_flags = 0;
+       long timeo;
+       int err;
 
        err = 0;
        sp = sctp_sk(sk);
@@ -1648,10 +1649,21 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                msg_name = msg->msg_name;
        }
 
-       sinfo = cmsgs.info;
        sinit = cmsgs.init;
+       if (cmsgs.sinfo != NULL) {
+               memset(&default_sinfo, 0, sizeof(default_sinfo));
+               default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
+               default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
+               default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
+               default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
+               default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
 
-       /* Did the user specify SNDRCVINFO?  */
+               sinfo = &default_sinfo;
+               fill_sinfo_ttl = true;
+       } else {
+               sinfo = cmsgs.srinfo;
+       }
+       /* Did the user specify SNDINFO/SNDRCVINFO? */
        if (sinfo) {
                sinfo_flags = sinfo->sinfo_flags;
                associd = sinfo->sinfo_assoc_id;
@@ -1858,8 +1870,8 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        pr_debug("%s: we have a valid association\n", __func__);
 
        if (!sinfo) {
-               /* If the user didn't specify SNDRCVINFO, make up one with
-                * some defaults.
+               /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
+                * one with some defaults.
                 */
                memset(&default_sinfo, 0, sizeof(default_sinfo));
                default_sinfo.sinfo_stream = asoc->default_stream;
@@ -1868,7 +1880,13 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                default_sinfo.sinfo_context = asoc->default_context;
                default_sinfo.sinfo_timetolive = asoc->default_timetolive;
                default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
+
                sinfo = &default_sinfo;
+       } else if (fill_sinfo_ttl) {
+               /* In case SNDINFO was specified, we still need to fill
+                * it with a default ttl from the assoc here.
+                */
+               sinfo->sinfo_timetolive = asoc->default_timetolive;
        }
 
        /* API 7.1.7, the sndbuf size per association bounds the
@@ -2042,8 +2060,6 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
  *  flags   - flags sent or received with the user message, see Section
  *            5 for complete description of the flags.
  */
-static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-
 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
                        struct msghdr *msg, size_t len, int noblock,
                        int flags, int *addr_len)
@@ -2094,9 +2110,16 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
                sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
        }
 
+       /* Check if we allow SCTP_NXTINFO. */
+       if (sp->recvnxtinfo)
+               sctp_ulpevent_read_nxtinfo(event, msg, sk);
+       /* Check if we allow SCTP_RCVINFO. */
+       if (sp->recvrcvinfo)
+               sctp_ulpevent_read_rcvinfo(event, msg);
        /* Check if we allow SCTP_SNDRCVINFO. */
        if (sp->subscribe.sctp_data_io_event)
                sctp_ulpevent_read_sndrcvinfo(event, msg);
+
 #if 0
        /* FIXME: we should be calling IP/IPv6 layers.  */
        if (sk->sk_protinfo.af_inet.cmsg_flags)
@@ -2182,8 +2205,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
        if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
                return -EFAULT;
 
-       /*
-        * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+       if (sctp_sk(sk)->subscribe.sctp_data_io_event)
+               pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
+                                   "Requested SCTP_SNDRCVINFO event.\n"
+                                   "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
+                                   current->comm, task_pid_nr(current));
+
+       /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
         * if there is no data to be sent or retransmit, the stack will
         * immediately send up this notification.
         */
@@ -2747,19 +2775,22 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
                                              char __user *optval,
                                              unsigned int optlen)
 {
-       struct sctp_sndrcvinfo info;
-       struct sctp_association *asoc;
        struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndrcvinfo info;
 
-       if (optlen != sizeof(struct sctp_sndrcvinfo))
+       if (optlen != sizeof(info))
                return -EINVAL;
        if (copy_from_user(&info, optval, optlen))
                return -EFAULT;
+       if (info.sinfo_flags &
+           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+             SCTP_ABORT | SCTP_EOF))
+               return -EINVAL;
 
        asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
        if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
                return -EINVAL;
-
        if (asoc) {
                asoc->default_stream = info.sinfo_stream;
                asoc->default_flags = info.sinfo_flags;
@@ -2777,6 +2808,44 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
        return 0;
 }
 
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_setsockopt_default_sndinfo(struct sock *sk,
+                                          char __user *optval,
+                                          unsigned int optlen)
+{
+       struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndinfo info;
+
+       if (optlen != sizeof(info))
+               return -EINVAL;
+       if (copy_from_user(&info, optval, optlen))
+               return -EFAULT;
+       if (info.snd_flags &
+           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+             SCTP_ABORT | SCTP_EOF))
+               return -EINVAL;
+
+       asoc = sctp_id2assoc(sk, info.snd_assoc_id);
+       if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+               return -EINVAL;
+       if (asoc) {
+               asoc->default_stream = info.snd_sid;
+               asoc->default_flags = info.snd_flags;
+               asoc->default_ppid = info.snd_ppid;
+               asoc->default_context = info.snd_context;
+       } else {
+               sp->default_stream = info.snd_sid;
+               sp->default_flags = info.snd_flags;
+               sp->default_ppid = info.snd_ppid;
+               sp->default_context = info.snd_context;
+       }
+
+       return 0;
+}
+
 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
  *
  * Requests that the local SCTP stack use the enclosed peer address as
@@ -3523,7 +3592,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
        return 0;
 }
 
-
 /*
  * SCTP_PEER_ADDR_THLDS
  *
@@ -3574,6 +3642,38 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
        return 0;
 }
 
+static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       int val;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *) optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
+
+       return 0;
+}
+
+static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       int val;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *) optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
+
+       return 0;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3671,6 +3771,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
                retval = sctp_setsockopt_default_send_param(sk, optval,
                                                            optlen);
                break;
+       case SCTP_DEFAULT_SNDINFO:
+               retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
+               break;
        case SCTP_PRIMARY_ADDR:
                retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
                break;
@@ -3725,6 +3828,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_PEER_ADDR_THLDS:
                retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
                break;
+       case SCTP_RECVRCVINFO:
+               retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
+               break;
+       case SCTP_RECVNXTINFO:
+               retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -3971,6 +4080,9 @@ static int sctp_init_sock(struct sock *sk)
        /* Enable Nagle algorithm by default.  */
        sp->nodelay           = 0;
 
+       sp->recvrcvinfo = 0;
+       sp->recvnxtinfo = 0;
+
        /* Enable by default. */
        sp->v4mapped          = 1;
 
@@ -4964,14 +5076,14 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
                                        int len, char __user *optval,
                                        int __user *optlen)
 {
-       struct sctp_sndrcvinfo info;
-       struct sctp_association *asoc;
        struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndrcvinfo info;
 
-       if (len < sizeof(struct sctp_sndrcvinfo))
+       if (len < sizeof(info))
                return -EINVAL;
 
-       len = sizeof(struct sctp_sndrcvinfo);
+       len = sizeof(info);
 
        if (copy_from_user(&info, optval, len))
                return -EFAULT;
@@ -4979,7 +5091,6 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
        asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
        if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
                return -EINVAL;
-
        if (asoc) {
                info.sinfo_stream = asoc->default_stream;
                info.sinfo_flags = asoc->default_flags;
@@ -5002,6 +5113,48 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
        return 0;
 }
 
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
+                                          char __user *optval,
+                                          int __user *optlen)
+{
+       struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndinfo info;
+
+       if (len < sizeof(info))
+               return -EINVAL;
+
+       len = sizeof(info);
+
+       if (copy_from_user(&info, optval, len))
+               return -EFAULT;
+
+       asoc = sctp_id2assoc(sk, info.snd_assoc_id);
+       if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+               return -EINVAL;
+       if (asoc) {
+               info.snd_sid = asoc->default_stream;
+               info.snd_flags = asoc->default_flags;
+               info.snd_ppid = asoc->default_ppid;
+               info.snd_context = asoc->default_context;
+       } else {
+               info.snd_sid = sp->default_stream;
+               info.snd_flags = sp->default_flags;
+               info.snd_ppid = sp->default_ppid;
+               info.snd_context = sp->default_context;
+       }
+
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &info, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 /*
  *
  * 7.1.5 SCTP_NODELAY
@@ -5752,6 +5905,46 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        return 0;
 }
 
+static int sctp_getsockopt_recvrcvinfo(struct sock *sk,        int len,
+                                      char __user *optval,
+                                      int __user *optlen)
+{
+       int val = 0;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+       if (sctp_sk(sk)->recvrcvinfo)
+               val = 1;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int sctp_getsockopt_recvnxtinfo(struct sock *sk,        int len,
+                                      char __user *optval,
+                                      int __user *optlen)
+{
+       int val = 0;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+       if (sctp_sk(sk)->recvnxtinfo)
+               val = 1;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
 {
@@ -5821,6 +6014,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
                retval = sctp_getsockopt_default_send_param(sk, len,
                                                            optval, optlen);
                break;
+       case SCTP_DEFAULT_SNDINFO:
+               retval = sctp_getsockopt_default_sndinfo(sk, len,
+                                                        optval, optlen);
+               break;
        case SCTP_PRIMARY_ADDR:
                retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
                break;
@@ -5895,6 +6092,12 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
        case SCTP_GET_ASSOC_STATS:
                retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
                break;
+       case SCTP_RECVRCVINFO:
+               retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
+               break;
+       case SCTP_RECVNXTINFO:
+               retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -6390,8 +6593,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
        struct cmsghdr *cmsg;
        struct msghdr *my_msg = (struct msghdr *)msg;
 
-       for (cmsg = CMSG_FIRSTHDR(msg);
-            cmsg != NULL;
+       for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL;
             cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
                if (!CMSG_OK(my_msg, cmsg))
                        return -EINVAL;
@@ -6404,7 +6606,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                switch (cmsg->cmsg_type) {
                case SCTP_INIT:
                        /* SCTP Socket API Extension
-                        * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
+                        * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
                         *
                         * This cmsghdr structure provides information for
                         * initializing new SCTP associations with sendmsg().
@@ -6416,15 +6618,15 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                         * ------------  ------------   ----------------------
                         * IPPROTO_SCTP  SCTP_INIT      struct sctp_initmsg
                         */
-                       if (cmsg->cmsg_len !=
-                           CMSG_LEN(sizeof(struct sctp_initmsg)))
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
                                return -EINVAL;
-                       cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg);
+
+                       cmsgs->init = CMSG_DATA(cmsg);
                        break;
 
                case SCTP_SNDRCV:
                        /* SCTP Socket API Extension
-                        * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
+                        * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
                         *
                         * This cmsghdr structure specifies SCTP options for
                         * sendmsg() and describes SCTP header information
@@ -6434,24 +6636,44 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                         * ------------  ------------   ----------------------
                         * IPPROTO_SCTP  SCTP_SNDRCV    struct sctp_sndrcvinfo
                         */
-                       if (cmsg->cmsg_len !=
-                           CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
                                return -EINVAL;
 
-                       cmsgs->info =
-                               (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+                       cmsgs->srinfo = CMSG_DATA(cmsg);
 
-                       /* Minimally, validate the sinfo_flags. */
-                       if (cmsgs->info->sinfo_flags &
+                       if (cmsgs->srinfo->sinfo_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
 
+               case SCTP_SNDINFO:
+                       /* SCTP Socket API Extension
+                        * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
+                        *
+                        * This cmsghdr structure specifies SCTP options for
+                        * sendmsg(). This structure and SCTP_RCVINFO replaces
+                        * SCTP_SNDRCV which has been deprecated.
+                        *
+                        * cmsg_level    cmsg_type      cmsg_data[]
+                        * ------------  ------------   ---------------------
+                        * IPPROTO_SCTP  SCTP_SNDINFO    struct sctp_sndinfo
+                        */
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
+                               return -EINVAL;
+
+                       cmsgs->sinfo = CMSG_DATA(cmsg);
+
+                       if (cmsgs->sinfo->snd_flags &
+                           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_ABORT | SCTP_EOF))
+                               return -EINVAL;
+                       break;
                default:
                        return -EINVAL;
                }
        }
+
        return 0;
 }
 
@@ -6518,8 +6740,8 @@ out:
  * Note: This is pretty much the same routine as in core/datagram.c
  * with a few changes to make lksctp work.
  */
-static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
-                                             int noblock, int *err)
+struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
+                                      int noblock, int *err)
 {
        int error;
        struct sk_buff *skb;
index 12c7e01c267711ef19878f6c1da1e66e257ecf7e..2e9ada10fd846c10bc6281c30f29cf1fed02ca3c 100644 (file)
@@ -424,8 +424,9 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
                                   void __user *buffer, size_t *lenp,
                                   loff_t *ppos)
 {
-       pr_warn_once("Changing rto_alpha or rto_beta may lead to "
-                    "suboptimal rtt/srtt estimations!\n");
+       if (write)
+               pr_warn_once("Changing rto_alpha or rto_beta may lead to "
+                            "suboptimal rtt/srtt estimations!\n");
 
        return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
 }
index 7dd672fa651f979ae6b93578fc678254a7317b6b..b10e047bbd15548b405393837047b437e4802d26 100644 (file)
@@ -594,15 +594,16 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 }
 
 /* What is the next timeout value for this transport? */
-unsigned long sctp_transport_timeout(struct sctp_transport *t)
+unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
-       unsigned long timeout;
-       timeout = t->rto + sctp_jitter(t->rto);
-       if ((t->state != SCTP_UNCONFIRMED) &&
-           (t->state != SCTP_PF))
-               timeout += t->hbinterval;
-       timeout += jiffies;
-       return timeout;
+       /* RTO + timer slack +/- 50% of RTO */
+       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+
+       if (trans->state != SCTP_UNCONFIRMED &&
+           trans->state != SCTP_PF)
+               timeout += trans->hbinterval;
+
+       return timeout + jiffies;
 }
 
 /* Reset transport variables to their initial values */
index b6842fdb53d4b09ffdafec78c2bab535e6eaad2d..e049298ecfa0f2a981ad5366161c4484ac67303c 100644 (file)
@@ -886,6 +886,69 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                 sizeof(sinfo), &sinfo);
 }
 
+/* RFC6458, Section 5.3.5 SCTP Receive Information Structure
+ * (SCTP_SNDRCV)
+ */
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *msghdr)
+{
+       struct sctp_rcvinfo rinfo;
+
+       if (sctp_ulpevent_is_notification(event))
+               return;
+
+       memset(&rinfo, 0, sizeof(struct sctp_rcvinfo));
+       rinfo.rcv_sid = event->stream;
+       rinfo.rcv_ssn = event->ssn;
+       rinfo.rcv_ppid = event->ppid;
+       rinfo.rcv_flags = event->flags;
+       rinfo.rcv_tsn = event->tsn;
+       rinfo.rcv_cumtsn = event->cumtsn;
+       rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc);
+       rinfo.rcv_context = event->asoc->default_rcv_context;
+
+       put_cmsg(msghdr, IPPROTO_SCTP, SCTP_RCVINFO,
+                sizeof(rinfo), &rinfo);
+}
+
+/* RFC6458, Section 5.3.6. SCTP Next Receive Information Structure
+ * (SCTP_NXTINFO)
+ */
+static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                                        struct msghdr *msghdr,
+                                        const struct sk_buff *skb)
+{
+       struct sctp_nxtinfo nxtinfo;
+
+       memset(&nxtinfo, 0, sizeof(nxtinfo));
+       nxtinfo.nxt_sid = event->stream;
+       nxtinfo.nxt_ppid = event->ppid;
+       nxtinfo.nxt_flags = event->flags;
+       if (sctp_ulpevent_is_notification(event))
+               nxtinfo.nxt_flags |= SCTP_NOTIFICATION;
+       nxtinfo.nxt_length = skb->len;
+       nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc);
+
+       put_cmsg(msghdr, IPPROTO_SCTP, SCTP_NXTINFO,
+                sizeof(nxtinfo), &nxtinfo);
+}
+
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *msghdr,
+                               struct sock *sk)
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err);
+       if (skb != NULL) {
+               __sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb),
+                                            msghdr, skb);
+               /* Just release refcount here. */
+               kfree_skb(skb);
+       }
+}
+
 /* Do accounting for bytes received and hold a reference to the association
  * for each skb.
  */
index 55c6c9d3e1ceee905bd25ce09d4c8bd7c41a3412..dd13bfa09333246fb6ba54171fd11ad0cc4fe63f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, Ericsson AB
+ * Copyright (c) 2004-2006, 2014, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
@@ -38,6 +38,8 @@
 #include "core.h"
 #include "link.h"
 #include "port.h"
+#include "socket.h"
+#include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
 
@@ -138,6 +140,11 @@ static void tipc_bclink_unlock(void)
                tipc_link_reset_all(node);
 }
 
+uint  tipc_bclink_get_mtu(void)
+{
+       return MAX_PKT_DEFAULT_MCAST;
+}
+
 void tipc_bclink_set_flags(unsigned int flags)
 {
        bclink->flags |= flags;
@@ -382,30 +389,50 @@ static void bclink_peek_nack(struct tipc_msg *msg)
        tipc_node_unlock(n_ptr);
 }
 
-/*
- * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
+/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
+ *                    and to identified node local sockets
+ * @buf: chain of buffers containing message
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
 int tipc_bclink_xmit(struct sk_buff *buf)
 {
-       int res;
+       int rc = 0;
+       int bc = 0;
+       struct sk_buff *clbuf;
 
-       tipc_bclink_lock();
-
-       if (!bclink->bcast_nodes.count) {
-               res = msg_data_sz(buf_msg(buf));
-               kfree_skb(buf);
-               goto exit;
+       /* Prepare clone of message for local node */
+       clbuf = tipc_msg_reassemble(buf);
+       if (unlikely(!clbuf)) {
+               kfree_skb_list(buf);
+               return -EHOSTUNREACH;
        }
 
-       res = __tipc_link_xmit(bcl, buf);
-       if (likely(res >= 0)) {
-               bclink_set_last_sent();
-               bcl->stats.queue_sz_counts++;
-               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+       /* Broadcast to all other nodes */
+       if (likely(bclink)) {
+               tipc_bclink_lock();
+               if (likely(bclink->bcast_nodes.count)) {
+                       rc = __tipc_link_xmit(bcl, buf);
+                       if (likely(!rc)) {
+                               bclink_set_last_sent();
+                               bcl->stats.queue_sz_counts++;
+                               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+                       }
+                       bc = 1;
+               }
+               tipc_bclink_unlock();
        }
-exit:
-       tipc_bclink_unlock();
-       return res;
+
+       if (unlikely(!bc))
+               kfree_skb_list(buf);
+
+       /* Deliver message clone */
+       if (likely(!rc))
+               tipc_sk_mcast_rcv(clbuf);
+       else
+               kfree_skb(clbuf);
+
+       return rc;
 }
 
 /**
@@ -443,7 +470,7 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        struct tipc_node *node;
        u32 next_in;
        u32 seqno;
-       int deferred;
+       int deferred = 0;
 
        /* Screen out unwanted broadcast messages */
 
@@ -494,7 +521,7 @@ receive:
                        tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
-                               tipc_port_mcast_rcv(buf, NULL);
+                               tipc_sk_mcast_rcv(buf);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
@@ -573,8 +600,7 @@ receive:
                node->bclink.deferred_size += deferred;
                bclink_update_last_sent(node, seqno);
                buf = NULL;
-       } else
-               deferred = 0;
+       }
 
        tipc_bclink_lock();
 
@@ -611,6 +637,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
+       struct tipc_msg *msg = buf_msg(buf);
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
@@ -618,10 +645,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
         * since they are sent in an unreliable manner and don't need it
         */
        if (likely(!msg_non_seq(buf_msg(buf)))) {
-               struct tipc_msg *msg;
-
                bcbuf_set_acks(buf, bclink->bcast_nodes.count);
-               msg = buf_msg(buf);
                msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tipc_net_id);
                bcl->stats.sent_info++;
@@ -638,12 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
                struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
                struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
-               struct tipc_bearer *b = p;
+               struct tipc_bearer *bp[2] = {p, s};
+               struct tipc_bearer *b = bp[msg_link_selector(msg)];
                struct sk_buff *tbuf;
 
                if (!p)
                        break; /* No more bearers to try */
-
+               if (!b)
+                       b = p;
                tipc_nmap_diff(&bcbearer->remains, &b->nodes,
                               &bcbearer->remains_new);
                if (bcbearer->remains_new.count == bcbearer->remains.count)
@@ -660,13 +686,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
                        tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
                        kfree_skb(tbuf); /* Bearer keeps a clone */
                }
-
-               /* Swap bearers for next packet */
-               if (s) {
-                       bcbearer->bpairs[bp_index].primary = s;
-                       bcbearer->bpairs[bp_index].secondary = p;
-               }
-
                if (bcbearer->remains_new.count == 0)
                        break; /* All targets reached */
 
index 00330c45df3e04d03626a31d5f2ee6ba66569298..4875d9536aee7a98ea523b895b3be6475529d7bd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.h: Include file for TIPC broadcast code
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -89,7 +89,6 @@ void tipc_bclink_add_node(u32 addr);
 void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int  tipc_bclink_xmit(struct sk_buff *buf);
 void tipc_bclink_rcv(struct sk_buff *buf);
 u32  tipc_bclink_get_last_sent(void);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
@@ -98,5 +97,7 @@ int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
 int  tipc_bclink_reset_stats(void);
 int  tipc_bclink_set_queue_limits(u32 limit);
 void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
+uint  tipc_bclink_get_mtu(void);
+int tipc_bclink_xmit(struct sk_buff *buf);
 
 #endif
index ad2c57f5868dafe28fbf4204f9fc2189c1156d25..fb1485dc6736ec84719c262334ead93d659bb7c4 100644 (file)
@@ -82,15 +82,13 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
 static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
                                 struct sk_buff **buf);
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
-static int  tipc_link_iovec_long_xmit(struct tipc_port *sender,
-                                     struct iovec const *msg_sect,
-                                     unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
 static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
 
 /*
  *  Simple link routines
@@ -335,13 +333,15 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 {
        struct tipc_port *p_ptr;
+       struct tipc_sock *tsk;
 
        spin_lock_bh(&tipc_port_list_lock);
        p_ptr = tipc_port_lock(origport);
        if (p_ptr) {
                if (!list_empty(&p_ptr->wait_list))
                        goto exit;
-               p_ptr->congested = 1;
+               tsk = tipc_port_to_sock(p_ptr);
+               tsk->link_cong = 1;
                p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
                list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
                l_ptr->stats.link_congs++;
@@ -355,6 +355,7 @@ exit:
 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
 {
        struct tipc_port *p_ptr;
+       struct tipc_sock *tsk;
        struct tipc_port *temp_p_ptr;
        int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 
@@ -370,10 +371,11 @@ void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
                                 wait_list) {
                if (win <= 0)
                        break;
+               tsk = tipc_port_to_sock(p_ptr);
                list_del_init(&p_ptr->wait_list);
                spin_lock_bh(p_ptr->lock);
-               p_ptr->congested = 0;
-               tipc_port_wakeup(p_ptr);
+               tsk->link_cong = 0;
+               tipc_sock_wakeup(tsk);
                win -= p_ptr->waiting_pkts;
                spin_unlock_bh(p_ptr->lock);
        }
@@ -676,178 +678,142 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        }
 }
 
-/*
- * link_bundle_buf(): Append contents of a buffer to
- * the tail of an existing one.
+/* tipc_link_cong: determine return value and how to treat the
+ * sent buffer during link congestion.
+ * - For plain, errorless user data messages we keep the buffer and
+ *   return -ELINKONG.
+ * - For all other messages we discard the buffer and return -EHOSTUNREACH
+ * - For TIPC internal messages we also reset the link
  */
-static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
-                          struct sk_buff *buf)
+static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
 {
-       struct tipc_msg *bundler_msg = buf_msg(bundler);
        struct tipc_msg *msg = buf_msg(buf);
-       u32 size = msg_size(msg);
-       u32 bundle_size = msg_size(bundler_msg);
-       u32 to_pos = align(bundle_size);
-       u32 pad = to_pos - bundle_size;
-
-       if (msg_user(bundler_msg) != MSG_BUNDLER)
-               return 0;
-       if (msg_type(bundler_msg) != OPEN_MSG)
-               return 0;
-       if (skb_tailroom(bundler) < (pad + size))
-               return 0;
-       if (l_ptr->max_pkt < (to_pos + size))
-               return 0;
-
-       skb_put(bundler, pad + size);
-       skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
-       msg_set_size(bundler_msg, to_pos + size);
-       msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
-       kfree_skb(buf);
-       l_ptr->stats.sent_bundled++;
-       return 1;
-}
-
-static void link_add_to_outqueue(struct tipc_link *l_ptr,
-                                struct sk_buff *buf,
-                                struct tipc_msg *msg)
-{
-       u32 ack = mod(l_ptr->next_in_no - 1);
-       u32 seqno = mod(l_ptr->next_out_no++);
+       uint psz = msg_size(msg);
+       uint imp = tipc_msg_tot_importance(msg);
+       u32 oport = msg_tot_origport(msg);
 
-       msg_set_word(msg, 2, ((ack << 16) | seqno));
-       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       buf->next = NULL;
-       if (l_ptr->first_out) {
-               l_ptr->last_out->next = buf;
-               l_ptr->last_out = buf;
-       } else
-               l_ptr->first_out = l_ptr->last_out = buf;
-
-       l_ptr->out_queue_size++;
-       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-}
-
-static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
-                                      struct sk_buff *buf_chain,
-                                      u32 long_msgno)
-{
-       struct sk_buff *buf;
-       struct tipc_msg *msg;
-
-       if (!l_ptr->next_out)
-               l_ptr->next_out = buf_chain;
-       while (buf_chain) {
-               buf = buf_chain;
-               buf_chain = buf_chain->next;
-
-               msg = buf_msg(buf);
-               msg_set_long_msgno(msg, long_msgno);
-               link_add_to_outqueue(l_ptr, buf, msg);
+       if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) {
+               if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) {
+                       link_schedule_port(link, oport, psz);
+                       return -ELINKCONG;
+               }
+       } else {
+               pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
+               tipc_link_reset(link);
        }
+       kfree_skb_list(buf);
+       return -EHOSTUNREACH;
 }
 
-/*
- * tipc_link_xmit() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_xmit
- * has failed, and from link_send()
+/**
+ * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
+ * @link: link to use
+ * @buf: chain of buffers containing message
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
+ * user data messages) or -EHOSTUNREACH (all other messages/senders)
+ * Only the socket functions tipc_send_stream() and tipc_send_packet() need
+ * to act on the return value, since they may need to do more send attempts.
  */
-int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       u32 size = msg_size(msg);
-       u32 dsz = msg_data_sz(msg);
-       u32 queue_size = l_ptr->out_queue_size;
-       u32 imp = tipc_msg_tot_importance(msg);
-       u32 queue_limit = l_ptr->queue_limit[imp];
-       u32 max_packet = l_ptr->max_pkt;
-
-       /* Match msg importance against queue limits: */
-       if (unlikely(queue_size >= queue_limit)) {
-               if (imp <= TIPC_CRITICAL_IMPORTANCE) {
-                       link_schedule_port(l_ptr, msg_origport(msg), size);
-                       kfree_skb(buf);
-                       return -ELINKCONG;
-               }
-               kfree_skb(buf);
-               if (imp > CONN_MANAGER) {
-                       pr_warn("%s<%s>, send queue full", link_rst_msg,
-                               l_ptr->name);
-                       tipc_link_reset(l_ptr);
-               }
-               return dsz;
+       uint psz = msg_size(msg);
+       uint qsz = link->out_queue_size;
+       uint sndlim = link->queue_limit[0];
+       uint imp = tipc_msg_tot_importance(msg);
+       uint mtu = link->max_pkt;
+       uint ack = mod(link->next_in_no - 1);
+       uint seqno = link->next_out_no;
+       uint bc_last_in = link->owner->bclink.last_in;
+       struct tipc_media_addr *addr = &link->media_addr;
+       struct sk_buff *next = buf->next;
+
+       /* Match queue limits against msg importance: */
+       if (unlikely(qsz >= link->queue_limit[imp]))
+               return tipc_link_cong(link, buf);
+
+       /* Has valid packet limit been used ? */
+       if (unlikely(psz > mtu)) {
+               kfree_skb_list(buf);
+               return -EMSGSIZE;
        }
 
-       /* Fragmentation needed ? */
-       if (size > max_packet)
-               return tipc_link_frag_xmit(l_ptr, buf);
-
-       /* Packet can be queued or sent. */
-       if (likely(!link_congested(l_ptr))) {
-               link_add_to_outqueue(l_ptr, buf, msg);
+       /* Prepare each packet for sending, and add to outqueue: */
+       while (buf) {
+               next = buf->next;
+               msg = buf_msg(buf);
+               msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
+               msg_set_bcast_ack(msg, bc_last_in);
+
+               if (!link->first_out) {
+                       link->first_out = buf;
+               } else if (qsz < sndlim) {
+                       link->last_out->next = buf;
+               } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
+                       link->stats.sent_bundled++;
+                       buf = next;
+                       next = buf->next;
+                       continue;
+               } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
+                       link->stats.sent_bundled++;
+                       link->stats.sent_bundles++;
+                       link->last_out->next = buf;
+                       if (!link->next_out)
+                               link->next_out = buf;
+               } else {
+                       link->last_out->next = buf;
+                       if (!link->next_out)
+                               link->next_out = buf;
+               }
 
-               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
-               l_ptr->unacked_window = 0;
-               return dsz;
-       }
-       /* Congestion: can message be bundled ? */
-       if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
-           (msg_user(msg) != MSG_FRAGMENTER)) {
-
-               /* Try adding message to an existing bundle */
-               if (l_ptr->next_out &&
-                   link_bundle_buf(l_ptr, l_ptr->last_out, buf))
-                       return dsz;
-
-               /* Try creating a new bundle */
-               if (size <= max_packet * 2 / 3) {
-                       struct sk_buff *bundler = tipc_buf_acquire(max_packet);
-                       struct tipc_msg bundler_hdr;
-
-                       if (bundler) {
-                               tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
-                                        INT_H_SIZE, l_ptr->addr);
-                               skb_copy_to_linear_data(bundler, &bundler_hdr,
-                                                       INT_H_SIZE);
-                               skb_trim(bundler, INT_H_SIZE);
-                               link_bundle_buf(l_ptr, bundler, buf);
-                               buf = bundler;
-                               msg = buf_msg(buf);
-                               l_ptr->stats.sent_bundles++;
-                       }
+               /* Send packet if possible: */
+               if (likely(++qsz <= sndlim)) {
+                       tipc_bearer_send(link->bearer_id, buf, addr);
+                       link->next_out = next;
+                       link->unacked_window = 0;
                }
+               seqno++;
+               link->last_out = buf;
+               buf = next;
        }
-       if (!l_ptr->next_out)
-               l_ptr->next_out = buf;
-       link_add_to_outqueue(l_ptr, buf, msg);
-       return dsz;
+       link->next_out_no = seqno;
+       link->out_queue_size = qsz;
+       return 0;
 }
 
-/*
- * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
- * has not been selected yet, and the the owner node is not locked
- * Called by TIPC internal users, e.g. the name distributor
+/**
+ * tipc_link_xmit() is the general link level function for message sending
+ * @buf: chain of buffers containing message
+ * @dsz: amount of user data to be sent
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
 {
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
-       int res = -ELINKCONG;
+       struct tipc_link *link = NULL;
+       struct tipc_node *node;
+       int rc = -EHOSTUNREACH;
 
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[selector & 1];
-               if (l_ptr)
-                       res = __tipc_link_xmit(l_ptr, buf);
-               else
-                       kfree_skb(buf);
-               tipc_node_unlock(n_ptr);
-       } else {
-               kfree_skb(buf);
+       node = tipc_node_find(dnode);
+       if (node) {
+               tipc_node_lock(node);
+               link = node->active_links[selector & 1];
+               if (link)
+                       rc = __tipc_link_xmit(link, buf);
+               tipc_node_unlock(node);
        }
-       return res;
+
+       if (link)
+               return rc;
+
+       if (likely(in_own_node(dnode)))
+               return tipc_sk_rcv(buf);
+
+       kfree_skb_list(buf);
+       return rc;
 }
 
 /*
@@ -858,7 +824,7 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
  *
  * Called with node locked
  */
-static void tipc_link_sync_xmit(struct tipc_link *l)
+static void tipc_link_sync_xmit(struct tipc_link *link)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -868,10 +834,9 @@ static void tipc_link_sync_xmit(struct tipc_link *l)
                return;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
-       msg_set_last_bcast(msg, l->owner->bclink.acked);
-       link_add_chain_to_outqueue(l, buf, 0);
-       tipc_link_push_queue(l);
+       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
+       msg_set_last_bcast(msg, link->owner->bclink.acked);
+       __tipc_link_xmit(link, buf);
 }
 
 /*
@@ -891,293 +856,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
        kfree_skb(buf);
 }
 
-/*
- * tipc_link_names_xmit - send name table entries to new neighbor
- *
- * Send routine for bulk delivery of name table messages when contact
- * with a new neighbor occurs. No link congestion checking is performed
- * because name table messages *must* be delivered. The messages must be
- * small enough not to require fragmentation.
- * Called without any locks held.
- */
-void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
-{
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *buf;
-       struct sk_buff *temp_buf;
-
-       if (list_empty(message_list))
-               return;
-
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[0];
-               if (l_ptr) {
-                       /* convert circular list to linear list */
-                       ((struct sk_buff *)message_list->prev)->next = NULL;
-                       link_add_chain_to_outqueue(l_ptr,
-                               (struct sk_buff *)message_list->next, 0);
-                       tipc_link_push_queue(l_ptr);
-                       INIT_LIST_HEAD(message_list);
-               }
-               tipc_node_unlock(n_ptr);
-       }
-
-       /* discard the messages if they couldn't be sent */
-       list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
-               list_del((struct list_head *)buf);
-               kfree_skb(buf);
-       }
-}
-
-/*
- * tipc_link_xmit_fast: Entry for data messages where the
- * destination link is known and the header is complete,
- * inclusive total message length. Very time critical.
- * Link is locked. Returns user data length.
- */
-static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
-                              u32 *used_max_pkt)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       int res = msg_data_sz(msg);
-
-       if (likely(!link_congested(l_ptr))) {
-               if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
-                       link_add_to_outqueue(l_ptr, buf, msg);
-                       tipc_bearer_send(l_ptr->bearer_id, buf,
-                                        &l_ptr->media_addr);
-                       l_ptr->unacked_window = 0;
-                       return res;
-               }
-               else
-                       *used_max_pkt = l_ptr->max_pkt;
-       }
-       return __tipc_link_xmit(l_ptr, buf);  /* All other cases */
-}
-
-/*
- * tipc_link_iovec_xmit_fast: Entry for messages where the
- * destination processor is known and the header is complete,
- * except for total message length.
- * Returns user data length or errno.
- */
-int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
-                             struct iovec const *msg_sect,
-                             unsigned int len, u32 destaddr)
-{
-       struct tipc_msg *hdr = &sender->phdr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *buf;
-       struct tipc_node *node;
-       int res;
-       u32 selector = msg_origport(hdr) & 1;
-
-again:
-       /*
-        * Try building message using port's max_pkt hint.
-        * (Must not hold any locks while building message.)
-        */
-       res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
-       /* Exit if build request was invalid */
-       if (unlikely(res < 0))
-               return res;
-
-       node = tipc_node_find(destaddr);
-       if (likely(node)) {
-               tipc_node_lock(node);
-               l_ptr = node->active_links[selector];
-               if (likely(l_ptr)) {
-                       if (likely(buf)) {
-                               res = tipc_link_xmit_fast(l_ptr, buf,
-                                                         &sender->max_pkt);
-exit:
-                               tipc_node_unlock(node);
-                               return res;
-                       }
-
-                       /* Exit if link (or bearer) is congested */
-                       if (link_congested(l_ptr)) {
-                               res = link_schedule_port(l_ptr,
-                                                        sender->ref, res);
-                               goto exit;
-                       }
-
-                       /*
-                        * Message size exceeds max_pkt hint; update hint,
-                        * then re-try fast path or fragment the message
-                        */
-                       sender->max_pkt = l_ptr->max_pkt;
-                       tipc_node_unlock(node);
-
-
-                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
-                               goto again;
-
-                       return tipc_link_iovec_long_xmit(sender, msg_sect,
-                                                        len, destaddr);
-               }
-               tipc_node_unlock(node);
-       }
-
-       /* Couldn't find a link to the destination node */
-       kfree_skb(buf);
-       tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
-       return -ENETUNREACH;
-}
-
-/*
- * tipc_link_iovec_long_xmit(): Entry for long messages where the
- * destination node is known and the header is complete,
- * inclusive total message length.
- * Link and bearer congestion status have been checked to be ok,
- * and are ignored if they change.
- *
- * Note that fragments do not use the full link MTU so that they won't have
- * to undergo refragmentation if link changeover causes them to be sent
- * over another link with an additional tunnel header added as prefix.
- * (Refragmentation will still occur if the other link has a smaller MTU.)
- *
- * Returns user data length or errno.
- */
-static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
-                                    struct iovec const *msg_sect,
-                                    unsigned int len, u32 destaddr)
-{
-       struct tipc_link *l_ptr;
-       struct tipc_node *node;
-       struct tipc_msg *hdr = &sender->phdr;
-       u32 dsz = len;
-       u32 max_pkt, fragm_sz, rest;
-       struct tipc_msg fragm_hdr;
-       struct sk_buff *buf, *buf_chain, *prev;
-       u32 fragm_crs, fragm_rest, hsz, sect_rest;
-       const unchar __user *sect_crs;
-       int curr_sect;
-       u32 fragm_no;
-       int res = 0;
-
-again:
-       fragm_no = 1;
-       max_pkt = sender->max_pkt - INT_H_SIZE;
-               /* leave room for tunnel header in case of link changeover */
-       fragm_sz = max_pkt - INT_H_SIZE;
-               /* leave room for fragmentation header in each fragment */
-       rest = dsz;
-       fragm_crs = 0;
-       fragm_rest = 0;
-       sect_rest = 0;
-       sect_crs = NULL;
-       curr_sect = -1;
-
-       /* Prepare reusable fragment header */
-       tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                INT_H_SIZE, msg_destnode(hdr));
-       msg_set_size(&fragm_hdr, max_pkt);
-       msg_set_fragm_no(&fragm_hdr, 1);
-
-       /* Prepare header of first fragment */
-       buf_chain = buf = tipc_buf_acquire(max_pkt);
-       if (!buf)
-               return -ENOMEM;
-       buf->next = NULL;
-       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
-       hsz = msg_hdr_sz(hdr);
-       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
-
-       /* Chop up message */
-       fragm_crs = INT_H_SIZE + hsz;
-       fragm_rest = fragm_sz - hsz;
-
-       do {            /* For all sections */
-               u32 sz;
-
-               if (!sect_rest) {
-                       sect_rest = msg_sect[++curr_sect].iov_len;
-                       sect_crs = msg_sect[curr_sect].iov_base;
-               }
-
-               if (sect_rest < fragm_rest)
-                       sz = sect_rest;
-               else
-                       sz = fragm_rest;
-
-               if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
-                       res = -EFAULT;
-error:
-                       kfree_skb_list(buf_chain);
-                       return res;
-               }
-               sect_crs += sz;
-               sect_rest -= sz;
-               fragm_crs += sz;
-               fragm_rest -= sz;
-               rest -= sz;
-
-               if (!fragm_rest && rest) {
-
-                       /* Initiate new fragment: */
-                       if (rest <= fragm_sz) {
-                               fragm_sz = rest;
-                               msg_set_type(&fragm_hdr, LAST_FRAGMENT);
-                       } else {
-                               msg_set_type(&fragm_hdr, FRAGMENT);
-                       }
-                       msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
-                       msg_set_fragm_no(&fragm_hdr, ++fragm_no);
-                       prev = buf;
-                       buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
-                       if (!buf) {
-                               res = -ENOMEM;
-                               goto error;
-                       }
-
-                       buf->next = NULL;
-                       prev->next = buf;
-                       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
-                       fragm_crs = INT_H_SIZE;
-                       fragm_rest = fragm_sz;
-               }
-       } while (rest > 0);
-
-       /*
-        * Now we have a buffer chain. Select a link and check
-        * that packet size is still OK
-        */
-       node = tipc_node_find(destaddr);
-       if (likely(node)) {
-               tipc_node_lock(node);
-               l_ptr = node->active_links[sender->ref & 1];
-               if (!l_ptr) {
-                       tipc_node_unlock(node);
-                       goto reject;
-               }
-               if (l_ptr->max_pkt < max_pkt) {
-                       sender->max_pkt = l_ptr->max_pkt;
-                       tipc_node_unlock(node);
-                       kfree_skb_list(buf_chain);
-                       goto again;
-               }
-       } else {
-reject:
-               kfree_skb_list(buf_chain);
-               tipc_port_iovec_reject(sender, hdr, msg_sect, len,
-                                      TIPC_ERR_NO_NODE);
-               return -ENETUNREACH;
-       }
-
-       /* Append chain of fragments to send queue & send them */
-       l_ptr->long_msg_seq_no++;
-       link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
-       l_ptr->stats.sent_fragments += fragm_no;
-       l_ptr->stats.sent_fragmented++;
-       tipc_link_push_queue(l_ptr);
-       tipc_node_unlock(node);
-       return dsz;
-}
-
 /*
  * tipc_link_push_packet: Push one unsent packet to the media
  */
@@ -1238,7 +916,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
                        tipc_bearer_send(l_ptr->bearer_id, buf,
                                         &l_ptr->media_addr);
                        if (msg_user(msg) == MSG_BUNDLER)
-                               msg_set_type(msg, CLOSED_MSG);
+                               msg_set_type(msg, BUNDLE_CLOSED);
                        l_ptr->next_out = buf->next;
                        return 0;
                }
@@ -1527,11 +1205,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(!list_empty(&l_ptr->waiting_ports)))
                        tipc_link_wakeup_ports(l_ptr, 0);
 
-               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
-                       l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-               }
-
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
@@ -1565,57 +1238,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(l_ptr->oldest_deferred_in))
                        head = link_insert_deferred_queue(l_ptr, head);
 
-               /* Deliver packet/message to correct user: */
-               if (unlikely(msg_user(msg) ==  CHANGEOVER_PROTOCOL)) {
-                       if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
-                       msg = buf_msg(buf);
-               } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       l_ptr->stats.recv_fragments++;
-                       if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
-                               l_ptr->stats.recv_fragmented++;
-                               msg = buf_msg(buf);
-                       } else {
-                               if (!l_ptr->reasm_buf)
-                                       tipc_link_reset(l_ptr);
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
+               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+                       l_ptr->stats.sent_acks++;
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               switch (msg_user(msg)) {
-               case TIPC_LOW_IMPORTANCE:
-               case TIPC_MEDIUM_IMPORTANCE:
-               case TIPC_HIGH_IMPORTANCE:
-               case TIPC_CRITICAL_IMPORTANCE:
+               if (tipc_link_prepare_input(l_ptr, &buf)) {
                        tipc_node_unlock(n_ptr);
-                       tipc_sk_rcv(buf);
                        continue;
-               case MSG_BUNDLER:
-                       l_ptr->stats.recv_bundles++;
-                       l_ptr->stats.recv_bundled += msg_msgcnt(msg);
-                       tipc_node_unlock(n_ptr);
-                       tipc_link_bundle_rcv(buf);
-                       continue;
-               case NAME_DISTRIBUTOR:
-                       n_ptr->bclink.recv_permitted = true;
-                       tipc_node_unlock(n_ptr);
-                       tipc_named_rcv(buf);
-                       continue;
-               case CONN_MANAGER:
-                       tipc_node_unlock(n_ptr);
-                       tipc_port_proto_rcv(buf);
-                       continue;
-               case BCAST_PROTOCOL:
-                       tipc_link_sync_rcv(n_ptr, buf);
-                       break;
-               default:
-                       kfree_skb(buf);
-                       break;
                }
                tipc_node_unlock(n_ptr);
+               msg = buf_msg(buf);
+               if (tipc_link_input(l_ptr, buf) != 0)
+                       goto discard;
                continue;
 unlock_discard:
                tipc_node_unlock(n_ptr);
@@ -1624,6 +1259,80 @@ discard:
        }
 }
 
+/**
+ * tipc_link_prepare_input - process TIPC link messages
+ *
+ * returns nonzero if the message was consumed
+ *
+ * Node lock must be held
+ */
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+{
+       struct tipc_node *n;
+       struct tipc_msg *msg;
+       int res = -EINVAL;
+
+       n = l->owner;
+       msg = buf_msg(*buf);
+       switch (msg_user(msg)) {
+       case CHANGEOVER_PROTOCOL:
+               if (tipc_link_tunnel_rcv(n, buf))
+                       res = 0;
+               break;
+       case MSG_FRAGMENTER:
+               l->stats.recv_fragments++;
+               if (tipc_buf_append(&l->reasm_buf, buf)) {
+                       l->stats.recv_fragmented++;
+                       res = 0;
+               } else if (!l->reasm_buf) {
+                       tipc_link_reset(l);
+               }
+               break;
+       case MSG_BUNDLER:
+               l->stats.recv_bundles++;
+               l->stats.recv_bundled += msg_msgcnt(msg);
+               res = 0;
+               break;
+       case NAME_DISTRIBUTOR:
+               n->bclink.recv_permitted = true;
+               res = 0;
+               break;
+       case BCAST_PROTOCOL:
+               tipc_link_sync_rcv(n, *buf);
+               break;
+       default:
+               res = 0;
+       }
+       return res;
+}
+/**
+ * tipc_link_input - Deliver message too higher layers
+ */
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       int res = 0;
+
+       switch (msg_user(msg)) {
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+       case CONN_MANAGER:
+               tipc_sk_rcv(buf);
+               break;
+       case NAME_DISTRIBUTOR:
+               tipc_named_rcv(buf);
+               break;
+       case MSG_BUNDLER:
+               tipc_link_bundle_rcv(buf);
+               break;
+       default:
+               res = -EINVAL;
+       }
+       return res;
+}
+
 /**
  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
  *
@@ -2217,6 +1926,7 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
        u32 msgcount = msg_msgcnt(buf_msg(buf));
        u32 pos = INT_H_SIZE;
        struct sk_buff *obuf;
+       struct tipc_msg *omsg;
 
        while (msgcount--) {
                obuf = buf_extract(buf, pos);
@@ -2224,82 +1934,18 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
                        pr_warn("Link unable to unbundle message(s)\n");
                        break;
                }
-               pos += align(msg_size(buf_msg(obuf)));
-               tipc_net_route_msg(obuf);
-       }
-       kfree_skb(buf);
-}
-
-/*
- *  Fragmentation/defragmentation:
- */
-
-/*
- * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
- * The buffer is complete, inclusive total message length.
- * Returns user data length.
- */
-static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
-{
-       struct sk_buff *buf_chain = NULL;
-       struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
-       struct tipc_msg *inmsg = buf_msg(buf);
-       struct tipc_msg fragm_hdr;
-       u32 insize = msg_size(inmsg);
-       u32 dsz = msg_data_sz(inmsg);
-       unchar *crs = buf->data;
-       u32 rest = insize;
-       u32 pack_sz = l_ptr->max_pkt;
-       u32 fragm_sz = pack_sz - INT_H_SIZE;
-       u32 fragm_no = 0;
-       u32 destaddr;
-
-       if (msg_short(inmsg))
-               destaddr = l_ptr->addr;
-       else
-               destaddr = msg_destnode(inmsg);
-
-       /* Prepare reusable fragment header: */
-       tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                INT_H_SIZE, destaddr);
-
-       /* Chop up message: */
-       while (rest > 0) {
-               struct sk_buff *fragm;
-
-               if (rest <= fragm_sz) {
-                       fragm_sz = rest;
-                       msg_set_type(&fragm_hdr, LAST_FRAGMENT);
-               }
-               fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
-               if (fragm == NULL) {
-                       kfree_skb(buf);
-                       kfree_skb_list(buf_chain);
-                       return -ENOMEM;
+               omsg = buf_msg(obuf);
+               pos += align(msg_size(omsg));
+               if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) {
+                       tipc_sk_rcv(obuf);
+               } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
+                       tipc_named_rcv(obuf);
+               } else {
+                       pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
+                       kfree_skb(obuf);
                }
-               msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
-               fragm_no++;
-               msg_set_fragm_no(&fragm_hdr, fragm_no);
-               skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
-                                              fragm_sz);
-               buf_chain_tail->next = fragm;
-               buf_chain_tail = fragm;
-
-               rest -= fragm_sz;
-               crs += fragm_sz;
-               msg_set_type(&fragm_hdr, FRAGMENT);
        }
        kfree_skb(buf);
-
-       /* Append chain of fragments to send queue & send them */
-       l_ptr->long_msg_seq_no++;
-       link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
-       l_ptr->stats.sent_fragments += fragm_no;
-       l_ptr->stats.sent_fragmented++;
-       tipc_link_push_queue(l_ptr);
-
-       return dsz;
 }
 
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
index 200d518b218ede4e0ad7c210c863c546f2362947..782983ccd323a8f4df659765f76888ec3faebc7a 100644 (file)
@@ -227,13 +227,8 @@ void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 void tipc_link_reset_list(unsigned int bearer_id);
 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
-void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
-int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
-int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
-                             struct iovec const *msg_sect,
-                             unsigned int len, u32 destnode);
 void tipc_link_bundle_rcv(struct sk_buff *buf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
index 0a37a472c29f9a6b51eaa00cda09526418e5d6ed..b6f45d029933cbfd328be776c5ea74d0b05ee017 100644 (file)
 
 #include "core.h"
 #include "msg.h"
+#include "addr.h"
+#include "name_table.h"
 
-u32 tipc_msg_tot_importance(struct tipc_msg *m)
+#define MAX_FORWARD_SIZE 1024
+
+static unsigned int align(unsigned int i)
 {
-       if (likely(msg_isdata(m))) {
-               if (likely(msg_orignode(m) == tipc_own_addr))
-                       return msg_importance(m);
-               return msg_importance(m) + 4;
-       }
-       if ((msg_user(m) == MSG_FRAGMENTER)  &&
-           (msg_type(m) == FIRST_FRAGMENT))
-               return msg_importance(msg_get_wrapped(m));
-       return msg_importance(m);
+       return (i + 3) & ~3u;
 }
 
-
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode)
 {
@@ -65,41 +60,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
        msg_set_destnode(m, destnode);
 }
 
-/**
- * tipc_msg_build - create message using specified header and data
- *
- * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
- *
- * Returns message data size or errno
- */
-int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  unsigned int len, int max_size, struct sk_buff **buf)
-{
-       int dsz, sz, hsz;
-       unsigned char *to;
-
-       dsz = len;
-       hsz = msg_hdr_sz(hdr);
-       sz = hsz + dsz;
-       msg_set_size(hdr, sz);
-       if (unlikely(sz > max_size)) {
-               *buf = NULL;
-               return dsz;
-       }
-
-       *buf = tipc_buf_acquire(sz);
-       if (!(*buf))
-               return -ENOMEM;
-       skb_copy_to_linear_data(*buf, hdr, hsz);
-       to = (*buf)->data + hsz;
-       if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
-               kfree_skb(*buf);
-               *buf = NULL;
-               return -EFAULT;
-       }
-       return dsz;
-}
-
 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
  * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
  *            out: set when successful non-complete reassembly, otherwise NULL
@@ -157,3 +117,303 @@ out_free:
        *buf = *headbuf = NULL;
        return 0;
 }
+
+
+/**
+ * tipc_msg_build - create buffer chain containing specified header and data
+ * @mhdr: Message header, to be prepended to data
+ * @iov: User data
+ * @offset: Posision in iov to start copying from
+ * @dsz: Total length of user data
+ * @pktmax: Max packet size that can be used
+ * @chain: Buffer or chain of buffers to be returned to caller
+ * Returns message data size or errno: -ENOMEM, -EFAULT
+ */
+int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
+                  int offset, int dsz, int pktmax , struct sk_buff **chain)
+{
+       int mhsz = msg_hdr_sz(mhdr);
+       int msz = mhsz + dsz;
+       int pktno = 1;
+       int pktsz;
+       int pktrem = pktmax;
+       int drem = dsz;
+       struct tipc_msg pkthdr;
+       struct sk_buff *buf, *prev;
+       char *pktpos;
+       int rc;
+
+       msg_set_size(mhdr, msz);
+
+       /* No fragmentation needed? */
+       if (likely(msz <= pktmax)) {
+               buf = tipc_buf_acquire(msz);
+               *chain = buf;
+               if (unlikely(!buf))
+                       return -ENOMEM;
+               skb_copy_to_linear_data(buf, mhdr, mhsz);
+               pktpos = buf->data + mhsz;
+               if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz))
+                       return dsz;
+               rc = -EFAULT;
+               goto error;
+       }
+
+       /* Prepare reusable fragment header */
+       tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+                     INT_H_SIZE, msg_destnode(mhdr));
+       msg_set_size(&pkthdr, pktmax);
+       msg_set_fragm_no(&pkthdr, pktno);
+
+       /* Prepare first fragment */
+       *chain = buf = tipc_buf_acquire(pktmax);
+       if (!buf)
+               return -ENOMEM;
+       pktpos = buf->data;
+       skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+       pktpos += INT_H_SIZE;
+       pktrem -= INT_H_SIZE;
+       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz);
+       pktpos += mhsz;
+       pktrem -= mhsz;
+
+       do {
+               if (drem < pktrem)
+                       pktrem = drem;
+
+               if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) {
+                       rc = -EFAULT;
+                       goto error;
+               }
+               drem -= pktrem;
+               offset += pktrem;
+
+               if (!drem)
+                       break;
+
+               /* Prepare new fragment: */
+               if (drem < (pktmax - INT_H_SIZE))
+                       pktsz = drem + INT_H_SIZE;
+               else
+                       pktsz = pktmax;
+               prev = buf;
+               buf = tipc_buf_acquire(pktsz);
+               if (!buf) {
+                       rc = -ENOMEM;
+                       goto error;
+               }
+               prev->next = buf;
+               msg_set_type(&pkthdr, FRAGMENT);
+               msg_set_size(&pkthdr, pktsz);
+               msg_set_fragm_no(&pkthdr, ++pktno);
+               skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+               pktpos = buf->data + INT_H_SIZE;
+               pktrem = pktsz - INT_H_SIZE;
+
+       } while (1);
+
+       msg_set_type(buf_msg(buf), LAST_FRAGMENT);
+       return dsz;
+error:
+       kfree_skb_list(*chain);
+       *chain = NULL;
+       return rc;
+}
+
+/**
+ * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
+ * @bbuf: the existing buffer ("bundle")
+ * @buf:  buffer to be appended
+ * @mtu:  max allowable size for the bundle buffer
+ * Consumes buffer if successful
+ * Returns true if bundling could be performed, otherwise false
+ */
+bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
+{
+       struct tipc_msg *bmsg = buf_msg(bbuf);
+       struct tipc_msg *msg = buf_msg(buf);
+       unsigned int bsz = msg_size(bmsg);
+       unsigned int msz = msg_size(msg);
+       u32 start = align(bsz);
+       u32 max = mtu - INT_H_SIZE;
+       u32 pad = start - bsz;
+
+       if (likely(msg_user(msg) == MSG_FRAGMENTER))
+               return false;
+       if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
+               return false;
+       if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+               return false;
+       if (likely(msg_user(bmsg) != MSG_BUNDLER))
+               return false;
+       if (likely(msg_type(bmsg) != BUNDLE_OPEN))
+               return false;
+       if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
+               return false;
+       if (unlikely(max < (start + msz)))
+               return false;
+
+       skb_put(bbuf, pad + msz);
+       skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
+       msg_set_size(bmsg, start + msz);
+       msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+       bbuf->next = buf->next;
+       kfree_skb(buf);
+       return true;
+}
+
+/**
+ * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
+ * @buf:  buffer to be appended and replaced
+ * @mtu:  max allowable size for the bundle buffer, inclusive header
+ * @dnode: destination node for message. (Not always present in header)
+ * Replaces buffer if successful
+ * Returns true if sucess, otherwise false
+ */
+bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
+{
+       struct sk_buff *bbuf;
+       struct tipc_msg *bmsg;
+       struct tipc_msg *msg = buf_msg(*buf);
+       u32 msz = msg_size(msg);
+       u32 max = mtu - INT_H_SIZE;
+
+       if (msg_user(msg) == MSG_FRAGMENTER)
+               return false;
+       if (msg_user(msg) == CHANGEOVER_PROTOCOL)
+               return false;
+       if (msg_user(msg) == BCAST_PROTOCOL)
+               return false;
+       if (msz > (max / 2))
+               return false;
+
+       bbuf = tipc_buf_acquire(max);
+       if (!bbuf)
+               return false;
+
+       skb_trim(bbuf, INT_H_SIZE);
+       bmsg = buf_msg(bbuf);
+       tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
+       msg_set_seqno(bmsg, msg_seqno(msg));
+       msg_set_ack(bmsg, msg_ack(msg));
+       msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
+       bbuf->next = (*buf)->next;
+       tipc_msg_bundle(bbuf, *buf, mtu);
+       *buf = bbuf;
+       return true;
+}
+
+/**
+ * tipc_msg_reverse(): swap source and destination addresses and add error code
+ * @buf:  buffer containing message to be reversed
+ * @dnode: return value: node where to send message after reversal
+ * @err:  error code to be set in message
+ * Consumes buffer if failure
+ * Returns true if success, otherwise false
+ */
+bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       uint imp = msg_importance(msg);
+       struct tipc_msg ohdr;
+       uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+
+       if (skb_linearize(buf))
+               goto exit;
+       if (msg_dest_droppable(msg))
+               goto exit;
+       if (msg_errcode(msg))
+               goto exit;
+
+       memcpy(&ohdr, msg, msg_hdr_sz(msg));
+       imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
+       if (msg_isdata(msg))
+               msg_set_importance(msg, imp);
+       msg_set_errcode(msg, err);
+       msg_set_origport(msg, msg_destport(&ohdr));
+       msg_set_destport(msg, msg_origport(&ohdr));
+       msg_set_prevnode(msg, tipc_own_addr);
+       if (!msg_short(msg)) {
+               msg_set_orignode(msg, msg_destnode(&ohdr));
+               msg_set_destnode(msg, msg_orignode(&ohdr));
+       }
+       msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
+       skb_trim(buf, msg_size(msg));
+       skb_orphan(buf);
+       *dnode = msg_orignode(&ohdr);
+       return true;
+exit:
+       kfree_skb(buf);
+       return false;
+}
+
+/**
+ * tipc_msg_eval: determine fate of message that found no destination
+ * @buf: the buffer containing the message.
+ * @dnode: return value: next-hop node, if message to be forwarded
+ * @err: error code to use, if message to be rejected
+ *
+ * Does not consume buffer
+ * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
+ * code if message to be rejected
+ */
+int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 dport;
+
+       if (msg_type(msg) != TIPC_NAMED_MSG)
+               return -TIPC_ERR_NO_PORT;
+       if (skb_linearize(buf))
+               return -TIPC_ERR_NO_NAME;
+       if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
+               return -TIPC_ERR_NO_NAME;
+       if (msg_reroute_cnt(msg) > 0)
+               return -TIPC_ERR_NO_NAME;
+
+       *dnode = addr_domain(msg_lookup_scope(msg));
+       dport = tipc_nametbl_translate(msg_nametype(msg),
+                                      msg_nameinst(msg),
+                                      dnode);
+       if (!dport)
+               return -TIPC_ERR_NO_NAME;
+       msg_incr_reroute_cnt(msg);
+       msg_set_destnode(msg, *dnode);
+       msg_set_destport(msg, dport);
+       return TIPC_OK;
+}
+
+/* tipc_msg_reassemble() - clone a buffer chain of fragments and
+ *                         reassemble the clones into one message
+ */
+struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
+{
+       struct sk_buff *buf = chain;
+       struct sk_buff *frag = buf;
+       struct sk_buff *head = NULL;
+       int hdr_sz;
+
+       /* Copy header if single buffer */
+       if (!buf->next) {
+               hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf));
+               return __pskb_copy(buf, hdr_sz, GFP_ATOMIC);
+       }
+
+       /* Clone all fragments and reassemble */
+       while (buf) {
+               frag = skb_clone(buf, GFP_ATOMIC);
+               if (!frag)
+                       goto error;
+               frag->next = NULL;
+               if (tipc_buf_append(&head, &frag))
+                       break;
+               if (!head)
+                       goto error;
+               buf = buf->next;
+       }
+       return frag;
+error:
+       pr_warn("Failed do clone local mcast rcv buffer\n");
+       kfree_skb(head);
+       return NULL;
+}
index 503511903d1d25c9c4106f669da0a7def7c9dd46..462fa194a6afe5e2f6f759d4a04846d6e26ef32e 100644 (file)
@@ -463,6 +463,11 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 #define FRAGMENT               1
 #define LAST_FRAGMENT          2
 
+/* Bundling protocol message types
+ */
+#define BUNDLE_OPEN             0
+#define BUNDLE_CLOSED           1
+
 /*
  * Link management protocol message types
  */
@@ -706,12 +711,36 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
-u32 tipc_msg_tot_importance(struct tipc_msg *m);
+static inline u32 tipc_msg_tot_importance(struct tipc_msg *m)
+{
+       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
+               return msg_importance(msg_get_wrapped(m));
+       return msg_importance(m);
+}
+
+static inline u32 msg_tot_origport(struct tipc_msg *m)
+{
+       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
+               return msg_origport(msg_get_wrapped(m));
+       return msg_origport(m);
+}
+
+bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err);
+
+int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
+
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
-int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  unsigned int len, int max_size, struct sk_buff **buf);
 
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
 
+bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
+
+bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode);
+
+int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
+                  int offset, int dsz, int mtu , struct sk_buff **chain);
+
+struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain);
+
 #endif
index 8ce730984aa1f0d429b2325d58862e1ef7799d4d..dcc15bcd569279a96b74cc052ee140e6f8e1bf3b 100644 (file)
@@ -101,24 +101,22 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
 
 void named_cluster_distribute(struct sk_buff *buf)
 {
-       struct sk_buff *buf_copy;
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
+       struct sk_buff *obuf;
+       struct tipc_node *node;
+       u32 dnode;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[n_ptr->addr & 1];
-               if (l_ptr) {
-                       buf_copy = skb_copy(buf, GFP_ATOMIC);
-                       if (!buf_copy) {
-                               tipc_node_unlock(n_ptr);
-                               break;
-                       }
-                       msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
-                       __tipc_link_xmit(l_ptr, buf_copy);
-               }
-               tipc_node_unlock(n_ptr);
+       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+               dnode = node->addr;
+               if (in_own_node(dnode))
+                       continue;
+               if (!tipc_node_active_links(node))
+                       continue;
+               obuf = skb_copy(buf, GFP_ATOMIC);
+               if (!obuf)
+                       break;
+               msg_set_destnode(buf_msg(obuf), dnode);
+               tipc_link_xmit(obuf, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -175,34 +173,44 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
        return buf;
 }
 
-/*
+/**
  * named_distribute - prepare name info for bulk distribution to another node
+ * @msg_list: list of messages (buffers) to be returned from this function
+ * @dnode: node to be updated
+ * @pls: linked list of publication items to be packed into buffer chain
  */
-static void named_distribute(struct list_head *message_list, u32 node,
-                            struct publ_list *pls, u32 max_item_buf)
+static void named_distribute(struct list_head *msg_list, u32 dnode,
+                            struct publ_list *pls)
 {
        struct publication *publ;
        struct sk_buff *buf = NULL;
        struct distr_item *item = NULL;
-       u32 left = 0;
-       u32 rest = pls->size * ITEM_SIZE;
+       uint dsz = pls->size * ITEM_SIZE;
+       uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
+       uint rem = dsz;
+       uint msg_rem = 0;
 
        list_for_each_entry(publ, &pls->list, local_list) {
+               /* Prepare next buffer: */
                if (!buf) {
-                       left = (rest <= max_item_buf) ? rest : max_item_buf;
-                       rest -= left;
-                       buf = named_prepare_buf(PUBLICATION, left, node);
+                       msg_rem = min_t(uint, rem, msg_dsz);
+                       rem -= msg_rem;
+                       buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
                        if (!buf) {
                                pr_warn("Bulk publication failure\n");
                                return;
                        }
                        item = (struct distr_item *)msg_data(buf_msg(buf));
                }
+
+               /* Pack publication into message: */
                publ_to_item(item, publ);
                item++;
-               left -= ITEM_SIZE;
-               if (!left) {
-                       list_add_tail((struct list_head *)buf, message_list);
+               msg_rem -= ITEM_SIZE;
+
+               /* Append full buffer to list: */
+               if (!msg_rem) {
+                       list_add_tail((struct list_head *)buf, msg_list);
                        buf = NULL;
                }
        }
@@ -211,16 +219,20 @@ static void named_distribute(struct list_head *message_list, u32 node,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(u32 max_item_buf, u32 node)
+void tipc_named_node_up(u32 dnode)
 {
-       LIST_HEAD(message_list);
+       LIST_HEAD(msg_list);
+       struct sk_buff *buf_chain;
 
        read_lock_bh(&tipc_nametbl_lock);
-       named_distribute(&message_list, node, &publ_cluster, max_item_buf);
-       named_distribute(&message_list, node, &publ_zone, max_item_buf);
+       named_distribute(&msg_list, dnode, &publ_cluster);
+       named_distribute(&msg_list, dnode, &publ_zone);
        read_unlock_bh(&tipc_nametbl_lock);
 
-       tipc_link_names_xmit(&message_list, node);
+       /* Convert circular list to linear list and send: */
+       buf_chain = (struct sk_buff *)msg_list.next;
+       ((struct sk_buff *)msg_list.prev)->next = NULL;
+       tipc_link_xmit(buf_chain, dnode, dnode);
 }
 
 /**
index b2eed4ec1526a34efd8b191e4b952bf9718f7a2e..8afe32b7fc9a0b6a2b9f78657fd13756eae0b465 100644 (file)
@@ -70,7 +70,7 @@ struct distr_item {
 struct sk_buff *tipc_named_publish(struct publication *publ);
 struct sk_buff *tipc_named_withdraw(struct publication *publ);
 void named_cluster_distribute(struct sk_buff *buf);
-void tipc_named_node_up(u32 max_item_buf, u32 node);
+void tipc_named_node_up(u32 dnode);
 void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 
index f64375e7f99fa4081ce2a30629071ca3e546aa05..7fcc94998feae6eb1427dd25ac965fece340227a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/net.c: TIPC network routing code
  *
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-static void net_route_named_msg(struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 dnode;
-       u32 dport;
-
-       if (!msg_named(msg)) {
-               kfree_skb(buf);
-               return;
-       }
-
-       dnode = addr_domain(msg_lookup_scope(msg));
-       dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
-       if (dport) {
-               msg_set_destnode(msg, dnode);
-               msg_set_destport(msg, dport);
-               tipc_net_route_msg(buf);
-               return;
-       }
-       tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
-}
-
-void tipc_net_route_msg(struct sk_buff *buf)
-{
-       struct tipc_msg *msg;
-       u32 dnode;
-
-       if (!buf)
-               return;
-       msg = buf_msg(buf);
-
-       /* Handle message for this node */
-       dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
-       if (tipc_in_scope(dnode, tipc_own_addr)) {
-               if (msg_isdata(msg)) {
-                       if (msg_mcast(msg))
-                               tipc_port_mcast_rcv(buf, NULL);
-                       else if (msg_destport(msg))
-                               tipc_sk_rcv(buf);
-                       else
-                               net_route_named_msg(buf);
-                       return;
-               }
-               switch (msg_user(msg)) {
-               case NAME_DISTRIBUTOR:
-                       tipc_named_rcv(buf);
-                       break;
-               case CONN_MANAGER:
-                       tipc_port_proto_rcv(buf);
-                       break;
-               default:
-                       kfree_skb(buf);
-               }
-               return;
-       }
-
-       /* Handle message for another node */
-       skb_trim(buf, msg_size(msg));
-       tipc_link_xmit(buf, dnode, msg_link_selector(msg));
-}
-
 int tipc_net_start(u32 addr)
 {
        char addr_string[16];
index c6c2b46f7c283095c4e29c7e0b11c5cdea2bcc01..59ef3388be2ce151320503d46a3f6f766375acee 100644 (file)
@@ -37,8 +37,6 @@
 #ifndef _TIPC_NET_H
 #define _TIPC_NET_H
 
-void tipc_net_route_msg(struct sk_buff *buf);
-
 int tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
index 5b44c3041be431955094de87f1815122deeea369..f7069299943f847f7853a3bb9e5cf781e6caf9b8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012 Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -155,21 +155,25 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        if (!active[0]) {
                active[0] = active[1] = l_ptr;
                node_established_contact(n_ptr);
-               return;
+               goto exit;
        }
        if (l_ptr->priority < active[0]->priority) {
                pr_info("New link <%s> becomes standby\n", l_ptr->name);
-               return;
+               goto exit;
        }
        tipc_link_dup_queue_xmit(active[0], l_ptr);
        if (l_ptr->priority == active[0]->priority) {
                active[0] = l_ptr;
-               return;
+               goto exit;
        }
        pr_info("Old link <%s> becomes standby\n", active[0]->name);
        if (active[1] != active[0])
                pr_info("Old link <%s> becomes standby\n", active[1]->name);
        active[0] = active[1] = l_ptr;
+exit:
+       /* Leave room for changeover header when returning 'mtu' to users: */
+       n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
+       n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
 }
 
 /**
@@ -229,6 +233,19 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
                tipc_link_failover_send_queue(l_ptr);
        else
                node_lost_contact(n_ptr);
+
+       /* Leave room for changeover header when returning 'mtu' to users: */
+       if (active[0]) {
+               n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
+               n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+               return;
+       }
+
+       /* Loopback link went down? No fragmentation needed from now on. */
+       if (n_ptr->addr == tipc_own_addr) {
+               n_ptr->act_mtus[0] = MAX_MSG_SIZE;
+               n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+       }
 }
 
 int tipc_node_active_links(struct tipc_node *n_ptr)
@@ -457,8 +474,6 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
 void tipc_node_unlock(struct tipc_node *node)
 {
        LIST_HEAD(nsub_list);
-       struct tipc_link *link;
-       int pkt_sz = 0;
        u32 addr = 0;
 
        if (likely(!node->action_flags)) {
@@ -471,18 +486,13 @@ void tipc_node_unlock(struct tipc_node *node)
                node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
        }
        if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
-               link = node->active_links[0];
                node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
-               if (link) {
-                       pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
-                                 ITEM_SIZE;
-                       addr = node->addr;
-               }
+               addr = node->addr;
        }
        spin_unlock_bh(&node->lock);
 
        if (!list_empty(&nsub_list))
                tipc_nodesub_notify(&nsub_list);
-       if (pkt_sz)
-               tipc_named_node_up(pkt_sz, addr);
+       if (addr)
+               tipc_named_node_up(addr);
 }
index 9087063793f26eb352f9b848a14b27545c8b02be..b61716a8218e583b6a01f711cfb3a7de950d3569 100644 (file)
@@ -41,6 +41,7 @@
 #include "addr.h"
 #include "net.h"
 #include "bearer.h"
+#include "msg.h"
 
 /*
  * Out-of-range value for node signature
@@ -105,6 +106,7 @@ struct tipc_node {
        spinlock_t lock;
        struct hlist_node hash;
        struct tipc_link *active_links[2];
+       u32 act_mtus[2];
        struct tipc_link *links[MAX_BEARERS];
        unsigned int action_flags;
        struct tipc_node_bclink bclink;
@@ -143,4 +145,19 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
                TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
+static inline uint tipc_node_get_mtu(u32 addr, u32 selector)
+{
+       struct tipc_node *node;
+       u32 mtu;
+
+       node = tipc_node_find(addr);
+
+       if (likely(node))
+               mtu = node->act_mtus[selector & 1];
+       else
+               mtu = MAX_MSG_SIZE;
+
+       return mtu;
+}
+
 #endif
index 7c59ab1d6ecb3dc26c4efb77cd243a00341c7b5b..2d13eea8574a9a3b74f2959d35031183686bc44f 100644 (file)
@@ -84,11 +84,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
 void tipc_nodesub_notify(struct list_head *nsub_list)
 {
        struct tipc_node_subscr *ns, *safe;
+       net_ev_handler handle_node_down;
 
        list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
-               if (ns->handle_node_down) {
-                       ns->handle_node_down(ns->usr_handle);
+               handle_node_down = ns->handle_node_down;
+               if (handle_node_down) {
                        ns->handle_node_down = NULL;
+                       handle_node_down(ns->usr_handle);
                }
        }
 }
index 5fd7acce01ea339b7ffe2873956e9513eb40bb49..7e096a5e770156631e701c91fc8c5ac4a34c5ce9 100644 (file)
@@ -42,8 +42,6 @@
 
 /* Connection management: */
 #define PROBING_INTERVAL 3600000       /* [ms] => 1 h */
-#define CONFIRMED 0
-#define PROBING 1
 
 #define MAX_REJECT_SIZE 1024
 
@@ -76,124 +74,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
                (!peernode && (orignode == tipc_own_addr));
 }
 
-/**
- * tipc_port_mcast_xmit - send a multicast message to local and remote
- * destinations
- */
-int tipc_port_mcast_xmit(struct tipc_port *oport,
-                        struct tipc_name_seq const *seq,
-                        struct iovec const *msg_sect,
-                        unsigned int len)
-{
-       struct tipc_msg *hdr;
-       struct sk_buff *buf;
-       struct sk_buff *ibuf = NULL;
-       struct tipc_port_list dports = {0, NULL, };
-       int ext_targets;
-       int res;
-
-       /* Create multicast message */
-       hdr = &oport->phdr;
-       msg_set_type(hdr, TIPC_MCAST_MSG);
-       msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
-       msg_set_destport(hdr, 0);
-       msg_set_destnode(hdr, 0);
-       msg_set_nametype(hdr, seq->type);
-       msg_set_namelower(hdr, seq->lower);
-       msg_set_nameupper(hdr, seq->upper);
-       msg_set_hdr_sz(hdr, MCAST_H_SIZE);
-       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (unlikely(!buf))
-               return res;
-
-       /* Figure out where to send multicast message */
-       ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
-                                               TIPC_NODE_SCOPE, &dports);
-
-       /* Send message to destinations (duplicate it only if necessary) */
-       if (ext_targets) {
-               if (dports.count != 0) {
-                       ibuf = skb_copy(buf, GFP_ATOMIC);
-                       if (ibuf == NULL) {
-                               tipc_port_list_free(&dports);
-                               kfree_skb(buf);
-                               return -ENOMEM;
-                       }
-               }
-               res = tipc_bclink_xmit(buf);
-               if ((res < 0) && (dports.count != 0))
-                       kfree_skb(ibuf);
-       } else {
-               ibuf = buf;
-       }
-
-       if (res >= 0) {
-               if (ibuf)
-                       tipc_port_mcast_rcv(ibuf, &dports);
-       } else {
-               tipc_port_list_free(&dports);
-       }
-       return res;
-}
-
-/**
- * tipc_port_mcast_rcv - deliver multicast message to all destination ports
- *
- * If there is no port list, perform a lookup to create one
- */
-void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
-{
-       struct tipc_msg *msg;
-       struct tipc_port_list dports = {0, NULL, };
-       struct tipc_port_list *item = dp;
-       int cnt = 0;
-
-       msg = buf_msg(buf);
-
-       /* Create destination port list, if one wasn't supplied */
-       if (dp == NULL) {
-               tipc_nametbl_mc_translate(msg_nametype(msg),
-                                    msg_namelower(msg),
-                                    msg_nameupper(msg),
-                                    TIPC_CLUSTER_SCOPE,
-                                    &dports);
-               item = dp = &dports;
-       }
-
-       /* Deliver a copy of message to each destination port */
-       if (dp->count != 0) {
-               msg_set_destnode(msg, tipc_own_addr);
-               if (dp->count == 1) {
-                       msg_set_destport(msg, dp->ports[0]);
-                       tipc_sk_rcv(buf);
-                       tipc_port_list_free(dp);
-                       return;
-               }
-               for (; cnt < dp->count; cnt++) {
-                       int index = cnt % PLSIZE;
-                       struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
-
-                       if (b == NULL) {
-                               pr_warn("Unable to deliver multicast message(s)\n");
-                               goto exit;
-                       }
-                       if ((index == 0) && (cnt != 0))
-                               item = item->next;
-                       msg_set_destport(buf_msg(b), item->ports[index]);
-                       tipc_sk_rcv(b);
-               }
-       }
-exit:
-       kfree_skb(buf);
-       tipc_port_list_free(dp);
-}
-
-
-void tipc_port_wakeup(struct tipc_port *port)
-{
-       tipc_sock_wakeup(tipc_port_to_sock(port));
-}
-
 /* tipc_port_init - intiate TIPC port and lock it
  *
  * Returns obtained reference if initialization is successful, zero otherwise
@@ -235,6 +115,8 @@ u32 tipc_port_init(struct tipc_port *p_ptr,
 void tipc_port_destroy(struct tipc_port *p_ptr)
 {
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
+       u32 peer;
 
        tipc_withdraw(p_ptr, 0, NULL);
 
@@ -246,14 +128,15 @@ void tipc_port_destroy(struct tipc_port *p_ptr)
        if (p_ptr->connected) {
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
                tipc_nodesub_unsubscribe(&p_ptr->subscription);
+               msg = buf_msg(buf);
+               peer = msg_destnode(msg);
+               tipc_link_xmit(buf, peer, msg_link_selector(msg));
        }
-
        spin_lock_bh(&tipc_port_list_lock);
        list_del(&p_ptr->port_list);
        list_del(&p_ptr->wait_list);
        spin_unlock_bh(&tipc_port_list_lock);
        k_term_timer(&p_ptr->timer);
-       tipc_net_route_msg(buf);
 }
 
 /*
@@ -275,100 +158,16 @@ static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
                msg_set_destport(msg, tipc_port_peerport(p_ptr));
                msg_set_origport(msg, p_ptr->ref);
                msg_set_msgcnt(msg, ack);
+               buf->next = NULL;
        }
        return buf;
 }
 
-int tipc_reject_msg(struct sk_buff *buf, u32 err)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       struct sk_buff *rbuf;
-       struct tipc_msg *rmsg;
-       int hdr_sz;
-       u32 imp;
-       u32 data_sz = msg_data_sz(msg);
-       u32 src_node;
-       u32 rmsg_sz;
-
-       /* discard rejected message if it shouldn't be returned to sender */
-       if (WARN(!msg_isdata(msg),
-                "attempt to reject message with user=%u", msg_user(msg))) {
-               dump_stack();
-               goto exit;
-       }
-       if (msg_errcode(msg) || msg_dest_droppable(msg))
-               goto exit;
-
-       /*
-        * construct returned message by copying rejected message header and
-        * data (or subset), then updating header fields that need adjusting
-        */
-       hdr_sz = msg_hdr_sz(msg);
-       rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
-
-       rbuf = tipc_buf_acquire(rmsg_sz);
-       if (rbuf == NULL)
-               goto exit;
-
-       rmsg = buf_msg(rbuf);
-       skb_copy_to_linear_data(rbuf, msg, rmsg_sz);
-
-       if (msg_connected(rmsg)) {
-               imp = msg_importance(rmsg);
-               if (imp < TIPC_CRITICAL_IMPORTANCE)
-                       msg_set_importance(rmsg, ++imp);
-       }
-       msg_set_non_seq(rmsg, 0);
-       msg_set_size(rmsg, rmsg_sz);
-       msg_set_errcode(rmsg, err);
-       msg_set_prevnode(rmsg, tipc_own_addr);
-       msg_swap_words(rmsg, 4, 5);
-       if (!msg_short(rmsg))
-               msg_swap_words(rmsg, 6, 7);
-
-       /* send self-abort message when rejecting on a connected port */
-       if (msg_connected(msg)) {
-               struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
-
-               if (p_ptr) {
-                       struct sk_buff *abuf = NULL;
-
-                       if (p_ptr->connected)
-                               abuf = port_build_self_abort_msg(p_ptr, err);
-                       tipc_port_unlock(p_ptr);
-                       tipc_net_route_msg(abuf);
-               }
-       }
-
-       /* send returned message & dispose of rejected message */
-       src_node = msg_prevnode(msg);
-       if (in_own_node(src_node))
-               tipc_sk_rcv(rbuf);
-       else
-               tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
-exit:
-       kfree_skb(buf);
-       return data_sz;
-}
-
-int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                          struct iovec const *msg_sect, unsigned int len,
-                          int err)
-{
-       struct sk_buff *buf;
-       int res;
-
-       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (!buf)
-               return res;
-
-       return tipc_reject_msg(buf, err);
-}
-
 static void port_timeout(unsigned long ref)
 {
        struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
 
        if (!p_ptr)
                return;
@@ -379,15 +178,16 @@ static void port_timeout(unsigned long ref)
        }
 
        /* Last probe answered ? */
-       if (p_ptr->probing_state == PROBING) {
+       if (p_ptr->probing_state == TIPC_CONN_PROBING) {
                buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
        } else {
                buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
-               p_ptr->probing_state = PROBING;
+               p_ptr->probing_state = TIPC_CONN_PROBING;
                k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
        }
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 
@@ -395,12 +195,14 @@ static void port_handle_node_down(unsigned long ref)
 {
        struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
 
        if (!p_ptr)
                return;
        buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 
@@ -412,6 +214,7 @@ static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 er
                struct tipc_msg *msg = buf_msg(buf);
                msg_swap_words(msg, 4, 5);
                msg_swap_words(msg, 6, 7);
+               buf->next = NULL;
        }
        return buf;
 }
@@ -436,60 +239,11 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 er
                if (imp < TIPC_CRITICAL_IMPORTANCE)
                        msg_set_importance(msg, ++imp);
                msg_set_errcode(msg, err);
+               buf->next = NULL;
        }
        return buf;
 }
 
-void tipc_port_proto_rcv(struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       struct tipc_port *p_ptr;
-       struct sk_buff *r_buf = NULL;
-       u32 destport = msg_destport(msg);
-       int wakeable;
-
-       /* Validate connection */
-       p_ptr = tipc_port_lock(destport);
-       if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
-               r_buf = tipc_buf_acquire(BASIC_H_SIZE);
-               if (r_buf) {
-                       msg = buf_msg(r_buf);
-                       tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
-                                     BASIC_H_SIZE, msg_orignode(msg));
-                       msg_set_errcode(msg, TIPC_ERR_NO_PORT);
-                       msg_set_origport(msg, destport);
-                       msg_set_destport(msg, msg_origport(msg));
-               }
-               if (p_ptr)
-                       tipc_port_unlock(p_ptr);
-               goto exit;
-       }
-
-       /* Process protocol message sent by peer */
-       switch (msg_type(msg)) {
-       case CONN_ACK:
-               wakeable = tipc_port_congested(p_ptr) && p_ptr->congested;
-               p_ptr->acked += msg_msgcnt(msg);
-               if (!tipc_port_congested(p_ptr)) {
-                       p_ptr->congested = 0;
-                       if (wakeable)
-                               tipc_port_wakeup(p_ptr);
-               }
-               break;
-       case CONN_PROBE:
-               r_buf = port_build_proto_msg(p_ptr, CONN_PROBE_REPLY, 0);
-               break;
-       default:
-               /* CONN_PROBE_REPLY or unrecognized - no action required */
-               break;
-       }
-       p_ptr->probing_state = CONFIRMED;
-       tipc_port_unlock(p_ptr);
-exit:
-       tipc_net_route_msg(r_buf);
-       kfree_skb(buf);
-}
-
 static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
 {
        struct publication *publ;
@@ -581,16 +335,19 @@ void tipc_acknowledge(u32 ref, u32 ack)
 {
        struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return;
-       if (p_ptr->connected) {
-               p_ptr->conn_unacked -= ack;
+       if (p_ptr->connected)
                buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
-       }
+
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       if (!buf)
+               return;
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
@@ -689,7 +446,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
        p_ptr->probing_interval = PROBING_INTERVAL;
-       p_ptr->probing_state = CONFIRMED;
+       p_ptr->probing_state = TIPC_CONN_OK;
        p_ptr->connected = 1;
        k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
 
@@ -698,7 +455,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                          (net_ev_handler)port_handle_node_down);
        res = 0;
 exit:
-       p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+       p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref);
        return res;
 }
 
@@ -741,6 +498,7 @@ int tipc_port_disconnect(u32 ref)
  */
 int tipc_port_shutdown(u32 ref)
 {
+       struct tipc_msg *msg;
        struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
@@ -750,149 +508,7 @@ int tipc_port_shutdown(u32 ref)
 
        buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
        return tipc_port_disconnect(ref);
 }
-
-/*
- *  tipc_port_iovec_rcv: Concatenate and deliver sectioned
- *                       message for this node.
- */
-static int tipc_port_iovec_rcv(struct tipc_port *sender,
-                              struct iovec const *msg_sect,
-                              unsigned int len)
-{
-       struct sk_buff *buf;
-       int res;
-
-       res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (likely(buf))
-               tipc_sk_rcv(buf);
-       return res;
-}
-
-/**
- * tipc_send - send message sections on connection
- */
-int tipc_send(struct tipc_port *p_ptr,
-             struct iovec const *msg_sect,
-             unsigned int len)
-{
-       u32 destnode;
-       int res;
-
-       if (!p_ptr->connected)
-               return -EINVAL;
-
-       p_ptr->congested = 1;
-       if (!tipc_port_congested(p_ptr)) {
-               destnode = tipc_port_peernode(p_ptr);
-               if (likely(!in_own_node(destnode)))
-                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                                       destnode);
-               else
-                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-
-               if (likely(res != -ELINKCONG)) {
-                       p_ptr->congested = 0;
-                       if (res > 0)
-                               p_ptr->sent++;
-                       return res;
-               }
-       }
-       if (tipc_port_unreliable(p_ptr)) {
-               p_ptr->congested = 0;
-               return len;
-       }
-       return -ELINKCONG;
-}
-
-/**
- * tipc_send2name - send message sections to port name
- */
-int tipc_send2name(struct tipc_port *p_ptr,
-                  struct tipc_name const *name,
-                  unsigned int domain,
-                  struct iovec const *msg_sect,
-                  unsigned int len)
-{
-       struct tipc_msg *msg;
-       u32 destnode = domain;
-       u32 destport;
-       int res;
-
-       if (p_ptr->connected)
-               return -EINVAL;
-
-       msg = &p_ptr->phdr;
-       msg_set_type(msg, TIPC_NAMED_MSG);
-       msg_set_hdr_sz(msg, NAMED_H_SIZE);
-       msg_set_nametype(msg, name->type);
-       msg_set_nameinst(msg, name->instance);
-       msg_set_lookup_scope(msg, tipc_addr_scope(domain));
-       destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
-       msg_set_destnode(msg, destnode);
-       msg_set_destport(msg, destport);
-
-       if (likely(destport || destnode)) {
-               if (likely(in_own_node(destnode)))
-                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-               else if (tipc_own_addr)
-                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                                       destnode);
-               else
-                       res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
-                                                    len, TIPC_ERR_NO_NODE);
-               if (likely(res != -ELINKCONG)) {
-                       if (res > 0)
-                               p_ptr->sent++;
-                       return res;
-               }
-               if (tipc_port_unreliable(p_ptr))
-                       return len;
-
-               return -ELINKCONG;
-       }
-       return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
-                                     TIPC_ERR_NO_NAME);
-}
-
-/**
- * tipc_send2port - send message sections to port identity
- */
-int tipc_send2port(struct tipc_port *p_ptr,
-                  struct tipc_portid const *dest,
-                  struct iovec const *msg_sect,
-                  unsigned int len)
-{
-       struct tipc_msg *msg;
-       int res;
-
-       if (p_ptr->connected)
-               return -EINVAL;
-
-       msg = &p_ptr->phdr;
-       msg_set_type(msg, TIPC_DIRECT_MSG);
-       msg_set_lookup_scope(msg, 0);
-       msg_set_destnode(msg, dest->node);
-       msg_set_destport(msg, dest->ref);
-       msg_set_hdr_sz(msg, BASIC_H_SIZE);
-
-       if (in_own_node(dest->node))
-               res =  tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-       else if (tipc_own_addr)
-               res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                               dest->node);
-       else
-               res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
-                                               TIPC_ERR_NO_NODE);
-       if (likely(res != -ELINKCONG)) {
-               if (res > 0)
-                       p_ptr->sent++;
-               return res;
-       }
-       if (tipc_port_unreliable(p_ptr))
-               return len;
-
-       return -ELINKCONG;
-}
index cf4ca5b1d9a48ae7752f9f476cad079e3f115da8..3f93454592b6dfdfc305e0c233c087d19d9985ab 100644 (file)
  * @connected: non-zero if port is currently connected to a peer port
  * @conn_type: TIPC type used when connection was established
  * @conn_instance: TIPC instance used when connection was established
- * @conn_unacked: number of unacknowledged messages received from peer port
  * @published: non-zero if port has one or more associated names
- * @congested: non-zero if cannot send because of link or port congestion
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
  * @ref: unique reference to port in TIPC object registry
  * @phdr: preformatted message header used when sending messages
  * @port_list: adjacent ports in TIPC's global list of ports
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
- * @sent: # of non-empty messages sent by port
- * @acked: # of non-empty message acknowledgements from connected port's peer
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
@@ -76,17 +72,13 @@ struct tipc_port {
        int connected;
        u32 conn_type;
        u32 conn_instance;
-       u32 conn_unacked;
        int published;
-       u32 congested;
        u32 max_pkt;
        u32 ref;
        struct tipc_msg phdr;
        struct list_head port_list;
        struct list_head wait_list;
        u32 waiting_pkts;
-       u32 sent;
-       u32 acked;
        struct list_head publications;
        u32 pub_count;
        u32 probing_state;
@@ -104,8 +96,6 @@ struct tipc_port_list;
 u32 tipc_port_init(struct tipc_port *p_ptr,
                   const unsigned int importance);
 
-int tipc_reject_msg(struct sk_buff *buf, u32 err);
-
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
 void tipc_port_destroy(struct tipc_port *p_ptr);
@@ -122,8 +112,6 @@ int tipc_port_disconnect(u32 portref);
 
 int tipc_port_shutdown(u32 ref);
 
-void tipc_port_wakeup(struct tipc_port *port);
-
 /*
  * The following routines require that the port be locked on entry
  */
@@ -132,39 +120,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                   struct tipc_portid const *peer);
 int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 
-/*
- * TIPC messaging routines
- */
-
-int tipc_send(struct tipc_port *port,
-             struct iovec const *msg_sect,
-             unsigned int len);
-
-int tipc_send2name(struct tipc_port *port,
-                  struct tipc_name const *name,
-                  u32 domain,
-                  struct iovec const *msg_sect,
-                  unsigned int len);
-
-int tipc_send2port(struct tipc_port *port,
-                  struct tipc_portid const *dest,
-                  struct iovec const *msg_sect,
-                  unsigned int len);
-
-int tipc_port_mcast_xmit(struct tipc_port *port,
-                        struct tipc_name_seq const *seq,
-                        struct iovec const *msg,
-                        unsigned int len);
-
-int tipc_port_iovec_reject(struct tipc_port *p_ptr,
-                          struct tipc_msg *hdr,
-                          struct iovec const *msg_sect,
-                          unsigned int len,
-                          int err);
-
 struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_proto_rcv(struct sk_buff *buf);
-void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
 void tipc_port_reinit(void);
 
 /**
@@ -185,12 +141,6 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
        spin_unlock_bh(p_ptr->lock);
 }
 
-static inline int tipc_port_congested(struct tipc_port *p_ptr)
-{
-       return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
-}
-
-
 static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
 {
        return msg_destnode(&p_ptr->phdr);
index ef0475568f9e39cfa3aa24d1d4d7a7132ea059a4..8477d08a6aa0fd3cc55b22a3456d9126ea58d4b2 100644 (file)
 
 #include "core.h"
 #include "port.h"
+#include "name_table.h"
 #include "node.h"
-
+#include "link.h"
 #include <linux/export.h>
+#include "link.h"
 
 #define SS_LISTENING   -1      /* socket is listening */
 #define SS_READY       -2      /* socket is connectionless */
 
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
+#define TIPC_FWD_MSG           1
 
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -123,9 +127,12 @@ static void advance_rx_queue(struct sock *sk)
 static void reject_rx_queue(struct sock *sk)
 {
        struct sk_buff *buf;
+       u32 dnode;
 
-       while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
-               tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+       while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
+               if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
+                       tipc_link_xmit(buf, dnode, 0);
+       }
 }
 
 /**
@@ -201,6 +208,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       tsk->sent_unacked = 0;
        atomic_set(&tsk->dupl_rcvcnt, 0);
        tipc_port_unlock(port);
 
@@ -303,6 +311,7 @@ static int tipc_release(struct socket *sock)
        struct tipc_sock *tsk;
        struct tipc_port *port;
        struct sk_buff *buf;
+       u32 dnode;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -331,7 +340,8 @@ static int tipc_release(struct socket *sock)
                                sock->state = SS_DISCONNECTING;
                                tipc_port_disconnect(port->ref);
                        }
-                       tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+                       if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
+                               tipc_link_xmit(buf, dnode, 0);
                }
        }
 
@@ -504,12 +514,12 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
        switch ((int)sock->state) {
        case SS_UNCONNECTED:
-               if (!tsk->port.congested)
+               if (!tsk->link_cong)
                        mask |= POLLOUT;
                break;
        case SS_READY:
        case SS_CONNECTED:
-               if (!tsk->port.congested)
+               if (!tsk->link_cong && !tipc_sk_conn_cong(tsk))
                        mask |= POLLOUT;
                /* fall thru' */
        case SS_CONNECTING:
@@ -525,6 +535,136 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
+/**
+ * tipc_sendmcast - send multicast message
+ * @sock: socket structure
+ * @seq: destination address
+ * @iov: message data to send
+ * @dsz: total length of message data
+ * @timeo: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Returns the number of bytes sent on success, or errno
+ */
+static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
+                         struct iovec *iov, size_t dsz, long timeo)
+{
+       struct sock *sk = sock->sk;
+       struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr;
+       struct sk_buff *buf;
+       uint mtu;
+       int rc;
+
+       msg_set_type(mhdr, TIPC_MCAST_MSG);
+       msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
+       msg_set_destport(mhdr, 0);
+       msg_set_destnode(mhdr, 0);
+       msg_set_nametype(mhdr, seq->type);
+       msg_set_namelower(mhdr, seq->lower);
+       msg_set_nameupper(mhdr, seq->upper);
+       msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
+
+new_mtu:
+       mtu = tipc_bclink_get_mtu();
+       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       if (unlikely(rc < 0))
+               return rc;
+
+       do {
+               rc = tipc_bclink_xmit(buf);
+               if (likely(rc >= 0)) {
+                       rc = dsz;
+                       break;
+               }
+               if (rc == -EMSGSIZE)
+                       goto new_mtu;
+               if (rc != -ELINKCONG)
+                       break;
+               rc = tipc_wait_for_sndmsg(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
+       return rc;
+}
+
+/* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
+ */
+void tipc_sk_mcast_rcv(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_port_list dports = {0, NULL, };
+       struct tipc_port_list *item;
+       struct sk_buff *b;
+       uint i, last, dst = 0;
+       u32 scope = TIPC_CLUSTER_SCOPE;
+
+       if (in_own_node(msg_orignode(msg)))
+               scope = TIPC_NODE_SCOPE;
+
+       /* Create destination port list: */
+       tipc_nametbl_mc_translate(msg_nametype(msg),
+                                 msg_namelower(msg),
+                                 msg_nameupper(msg),
+                                 scope,
+                                 &dports);
+       last = dports.count;
+       if (!last) {
+               kfree_skb(buf);
+               return;
+       }
+
+       for (item = &dports; item; item = item->next) {
+               for (i = 0; i < PLSIZE && ++dst <= last; i++) {
+                       b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
+                       if (!b) {
+                               pr_warn("Failed do clone mcast rcv buffer\n");
+                               continue;
+                       }
+                       msg_set_destport(msg, item->ports[i]);
+                       tipc_sk_rcv(b);
+               }
+       }
+       tipc_port_list_free(&dports);
+}
+
+/**
+ * tipc_sk_proto_rcv - receive a connection mng protocol message
+ * @tsk: receiving socket
+ * @dnode: node to send response message to, if any
+ * @buf: buffer containing protocol message
+ * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
+ * (CONN_PROBE_REPLY) message should be forwarded.
+ */
+static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
+                            struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_port *port = &tsk->port;
+       int conn_cong;
+
+       /* Ignore if connection cannot be validated: */
+       if (!port->connected || !tipc_port_peer_msg(port, msg))
+               goto exit;
+
+       port->probing_state = TIPC_CONN_OK;
+
+       if (msg_type(msg) == CONN_ACK) {
+               conn_cong = tipc_sk_conn_cong(tsk);
+               tsk->sent_unacked -= msg_msgcnt(msg);
+               if (conn_cong)
+                       tipc_sock_wakeup(tsk);
+       } else if (msg_type(msg) == CONN_PROBE) {
+               if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
+                       return TIPC_OK;
+               msg_set_type(msg, CONN_PROBE_REPLY);
+               return TIPC_FWD_MSG;
+       }
+       /* Do nothing if msg_type() == CONN_PROBE_REPLY */
+exit:
+       kfree_skb(buf);
+       return TIPC_OK;
+}
+
 /**
  * dest_name_check - verify user is permitted to send to specified port name
  * @dest: destination address
@@ -539,6 +679,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
 {
        struct tipc_cfg_msg_hdr hdr;
 
+       if (unlikely(dest->addrtype == TIPC_ADDR_ID))
+               return 0;
        if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
                return 0;
        if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
@@ -575,19 +717,18 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
                        return sock_intr_errno(*timeo_p);
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               done = sk_wait_event(sk, timeo_p, !tsk->port.congested);
+               done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
-
 /**
  * tipc_sendmsg - send message in connectionless manner
  * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
- * @total_len: length of message
+ * @dsz: amount of user data to be sent
  *
  * Message must have an destination specified explicitly.
  * Used for SOCK_RDM and SOCK_DGRAM messages,
@@ -597,100 +738,123 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
  * Returns the number of bytes sent on success, or errno otherwise
  */
 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *m, size_t total_len)
+                       struct msghdr *m, size_t dsz)
 {
+       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_port *port = &tsk->port;
-       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       int needs_conn;
+       struct tipc_msg *mhdr = &port->phdr;
+       struct iovec *iov = m->msg_iov;
+       u32 dnode, dport;
+       struct sk_buff *buf;
+       struct tipc_name_seq *seq = &dest->addr.nameseq;
+       u32 mtu;
        long timeo;
-       int res = -EINVAL;
+       int rc = -EINVAL;
 
        if (unlikely(!dest))
                return -EDESTADDRREQ;
+
        if (unlikely((m->msg_namelen < sizeof(*dest)) ||
                     (dest->family != AF_TIPC)))
                return -EINVAL;
-       if (total_len > TIPC_MAX_USER_MSG_SIZE)
+
+       if (dsz > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
        if (iocb)
                lock_sock(sk);
 
-       needs_conn = (sock->state != SS_READY);
-       if (unlikely(needs_conn)) {
+       if (unlikely(sock->state != SS_READY)) {
                if (sock->state == SS_LISTENING) {
-                       res = -EPIPE;
+                       rc = -EPIPE;
                        goto exit;
                }
                if (sock->state != SS_UNCONNECTED) {
-                       res = -EISCONN;
+                       rc = -EISCONN;
                        goto exit;
                }
                if (tsk->port.published) {
-                       res = -EOPNOTSUPP;
+                       rc = -EOPNOTSUPP;
                        goto exit;
                }
                if (dest->addrtype == TIPC_ADDR_NAME) {
                        tsk->port.conn_type = dest->addr.name.name.type;
                        tsk->port.conn_instance = dest->addr.name.name.instance;
                }
-
-               /* Abort any pending connection attempts (very unlikely) */
-               reject_rx_queue(sk);
        }
+       rc = dest_name_check(dest, m);
+       if (rc)
+               goto exit;
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
-       do {
-               if (dest->addrtype == TIPC_ADDR_NAME) {
-                       res = dest_name_check(dest, m);
-                       if (res)
-                               break;
-                       res = tipc_send2name(port,
-                                            &dest->addr.name.name,
-                                            dest->addr.name.domain,
-                                            m->msg_iov,
-                                            total_len);
-               } else if (dest->addrtype == TIPC_ADDR_ID) {
-                       res = tipc_send2port(port,
-                                            &dest->addr.id,
-                                            m->msg_iov,
-                                            total_len);
-               } else if (dest->addrtype == TIPC_ADDR_MCAST) {
-                       if (needs_conn) {
-                               res = -EOPNOTSUPP;
-                               break;
-                       }
-                       res = dest_name_check(dest, m);
-                       if (res)
-                               break;
-                       res = tipc_port_mcast_xmit(port,
-                                                  &dest->addr.nameseq,
-                                                  m->msg_iov,
-                                                  total_len);
+
+       if (dest->addrtype == TIPC_ADDR_MCAST) {
+               rc = tipc_sendmcast(sock, seq, iov, dsz, timeo);
+               goto exit;
+       } else if (dest->addrtype == TIPC_ADDR_NAME) {
+               u32 type = dest->addr.name.name.type;
+               u32 inst = dest->addr.name.name.instance;
+               u32 domain = dest->addr.name.domain;
+
+               dnode = domain;
+               msg_set_type(mhdr, TIPC_NAMED_MSG);
+               msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
+               msg_set_nametype(mhdr, type);
+               msg_set_nameinst(mhdr, inst);
+               msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
+               dport = tipc_nametbl_translate(type, inst, &dnode);
+               msg_set_destnode(mhdr, dnode);
+               msg_set_destport(mhdr, dport);
+               if (unlikely(!dport && !dnode)) {
+                       rc = -EHOSTUNREACH;
+                       goto exit;
                }
-               if (likely(res != -ELINKCONG)) {
-                       if (needs_conn && (res >= 0))
+       } else if (dest->addrtype == TIPC_ADDR_ID) {
+               dnode = dest->addr.id.node;
+               msg_set_type(mhdr, TIPC_DIRECT_MSG);
+               msg_set_lookup_scope(mhdr, 0);
+               msg_set_destnode(mhdr, dnode);
+               msg_set_destport(mhdr, dest->addr.id.ref);
+               msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
+       }
+
+new_mtu:
+       mtu = tipc_node_get_mtu(dnode, tsk->port.ref);
+       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       if (rc < 0)
+               goto exit;
+
+       do {
+               rc = tipc_link_xmit(buf, dnode, tsk->port.ref);
+               if (likely(rc >= 0)) {
+                       if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
+                       rc = dsz;
                        break;
                }
-               res = tipc_wait_for_sndmsg(sock, &timeo);
-               if (res)
+               if (rc == -EMSGSIZE)
+                       goto new_mtu;
+
+               if (rc != -ELINKCONG)
                        break;
-       } while (1);
 
+               rc = tipc_wait_for_sndmsg(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
 exit:
        if (iocb)
                release_sock(sk);
-       return res;
+
+       return rc;
 }
 
 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_port *port = &tsk->port;
        DEFINE_WAIT(wait);
        int done;
 
@@ -709,37 +873,49 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                done = sk_wait_event(sk, timeo_p,
-                                    (!port->congested || !port->connected));
+                                    (!tsk->link_cong &&
+                                     !tipc_sk_conn_cong(tsk)) ||
+                                    !tsk->port.connected);
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
 /**
- * tipc_send_packet - send a connection-oriented message
- * @iocb: if NULL, indicates that socket lock is already held
+ * tipc_send_stream - send stream-oriented data
+ * @iocb: (unused)
  * @sock: socket structure
- * @m: message to send
- * @total_len: length of message
+ * @m: data to send
+ * @dsz: total length of data to be transmitted
  *
- * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
+ * Used for SOCK_STREAM data.
  *
- * Returns the number of bytes sent on success, or errno otherwise
+ * Returns the number of bytes sent on success (or partial success),
+ * or errno if no data sent
  */
-static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
+       struct tipc_msg *mhdr = &port->phdr;
+       struct sk_buff *buf;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       int res = -EINVAL;
+       u32 ref = port->ref;
+       int rc = -EINVAL;
        long timeo;
+       u32 dnode;
+       uint mtu, send, sent = 0;
 
        /* Handle implied connection establishment */
-       if (unlikely(dest))
-               return tipc_sendmsg(iocb, sock, m, total_len);
-
-       if (total_len > TIPC_MAX_USER_MSG_SIZE)
+       if (unlikely(dest)) {
+               rc = tipc_sendmsg(iocb, sock, m, dsz);
+               if (dsz && (dsz == rc))
+                       tsk->sent_unacked = 1;
+               return rc;
+       }
+       if (dsz > (uint)INT_MAX)
                return -EMSGSIZE;
 
        if (iocb)
@@ -747,123 +923,66 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
 
        if (unlikely(sock->state != SS_CONNECTED)) {
                if (sock->state == SS_DISCONNECTING)
-                       res = -EPIPE;
+                       rc = -EPIPE;
                else
-                       res = -ENOTCONN;
+                       rc = -ENOTCONN;
                goto exit;
        }
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+       dnode = tipc_port_peernode(port);
+
+next:
+       mtu = port->max_pkt;
+       send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
+       rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
+       if (unlikely(rc < 0))
+               goto exit;
        do {
-               res = tipc_send(&tsk->port, m->msg_iov, total_len);
-               if (likely(res != -ELINKCONG))
-                       break;
-               res = tipc_wait_for_sndpkt(sock, &timeo);
-               if (res)
-                       break;
-       } while (1);
+               if (likely(!tipc_sk_conn_cong(tsk))) {
+                       rc = tipc_link_xmit(buf, dnode, ref);
+                       if (likely(!rc)) {
+                               tsk->sent_unacked++;
+                               sent += send;
+                               if (sent == dsz)
+                                       break;
+                               goto next;
+                       }
+                       if (rc == -EMSGSIZE) {
+                               port->max_pkt = tipc_node_get_mtu(dnode, ref);
+                               goto next;
+                       }
+                       if (rc != -ELINKCONG)
+                               break;
+               }
+               rc = tipc_wait_for_sndpkt(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
 exit:
        if (iocb)
                release_sock(sk);
-       return res;
+       return sent ? sent : rc;
 }
 
 /**
- * tipc_send_stream - send stream-oriented data
- * @iocb: (unused)
+ * tipc_send_packet - send a connection-oriented message
+ * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
- * @m: data to send
- * @total_len: total length of data to be sent
+ * @m: message to send
+ * @dsz: length of data to be transmitted
  *
- * Used for SOCK_STREAM data.
+ * Used for SOCK_SEQPACKET messages.
  *
- * Returns the number of bytes sent on success (or partial success),
- * or errno if no data sent
+ * Returns the number of bytes sent on success, or errno otherwise
  */
-static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t dsz)
 {
-       struct sock *sk = sock->sk;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       struct msghdr my_msg;
-       struct iovec my_iov;
-       struct iovec *curr_iov;
-       int curr_iovlen;
-       char __user *curr_start;
-       u32 hdr_size;
-       int curr_left;
-       int bytes_to_send;
-       int bytes_sent;
-       int res;
-
-       lock_sock(sk);
-
-       /* Handle special cases where there is no connection */
-       if (unlikely(sock->state != SS_CONNECTED)) {
-               if (sock->state == SS_UNCONNECTED)
-                       res = tipc_send_packet(NULL, sock, m, total_len);
-               else
-                       res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
-               goto exit;
-       }
-
-       if (unlikely(m->msg_name)) {
-               res = -EISCONN;
-               goto exit;
-       }
-
-       if (total_len > (unsigned int)INT_MAX) {
-               res = -EMSGSIZE;
-               goto exit;
-       }
-
-       /*
-        * Send each iovec entry using one or more messages
-        *
-        * Note: This algorithm is good for the most likely case
-        * (i.e. one large iovec entry), but could be improved to pass sets
-        * of small iovec entries into send_packet().
-        */
-       curr_iov = m->msg_iov;
-       curr_iovlen = m->msg_iovlen;
-       my_msg.msg_iov = &my_iov;
-       my_msg.msg_iovlen = 1;
-       my_msg.msg_flags = m->msg_flags;
-       my_msg.msg_name = NULL;
-       bytes_sent = 0;
-
-       hdr_size = msg_hdr_sz(&tsk->port.phdr);
-
-       while (curr_iovlen--) {
-               curr_start = curr_iov->iov_base;
-               curr_left = curr_iov->iov_len;
-
-               while (curr_left) {
-                       bytes_to_send = tsk->port.max_pkt - hdr_size;
-                       if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
-                               bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
-                       if (curr_left < bytes_to_send)
-                               bytes_to_send = curr_left;
-                       my_iov.iov_base = curr_start;
-                       my_iov.iov_len = bytes_to_send;
-                       res = tipc_send_packet(NULL, sock, &my_msg,
-                                              bytes_to_send);
-                       if (res < 0) {
-                               if (bytes_sent)
-                                       res = bytes_sent;
-                               goto exit;
-                       }
-                       curr_left -= bytes_to_send;
-                       curr_start += bytes_to_send;
-                       bytes_sent += bytes_to_send;
-               }
+       if (dsz > TIPC_MAX_USER_MSG_SIZE)
+               return -EMSGSIZE;
 
-               curr_iov++;
-       }
-       res = bytes_sent;
-exit:
-       release_sock(sk);
-       return res;
+       return tipc_send_stream(iocb, sock, m, dsz);
 }
 
 /**
@@ -1104,8 +1223,10 @@ restart:
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
                if ((sock->state != SS_READY) &&
-                   (++port->conn_unacked >= TIPC_CONNACK_INTV))
-                       tipc_acknowledge(port->ref, port->conn_unacked);
+                   (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
+                       tipc_acknowledge(port->ref, tsk->rcv_unacked);
+                       tsk->rcv_unacked = 0;
+               }
                advance_rx_queue(sk);
        }
 exit:
@@ -1213,8 +1334,10 @@ restart:
 
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
-                       tipc_acknowledge(port->ref, port->conn_unacked);
+               if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
+                       tipc_acknowledge(port->ref, tsk->rcv_unacked);
+                       tsk->rcv_unacked = 0;
+               }
                advance_rx_queue(sk);
        }
 
@@ -1269,17 +1392,16 @@ static void tipc_data_ready(struct sock *sk)
  * @tsk: TIPC socket
  * @msg: message
  *
- * Returns TIPC error status code and socket error status code
- * once it encounters some errors
+ * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise
  */
-static u32 filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
+static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
 {
        struct sock *sk = &tsk->sk;
        struct tipc_port *port = &tsk->port;
        struct socket *sock = sk->sk_socket;
        struct tipc_msg *msg = buf_msg(*buf);
 
-       u32 retval = TIPC_ERR_NO_PORT;
+       int retval = -TIPC_ERR_NO_PORT;
        int res;
 
        if (msg_mcast(msg))
@@ -1382,32 +1504,37 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
  *
  * Called with socket lock already taken; port lock may also be taken.
  *
- * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
+ * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
  */
-static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
+static int filter_rcv(struct sock *sk, struct sk_buff *buf)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *msg = buf_msg(buf);
        unsigned int limit = rcvbuf_limit(sk, buf);
-       u32 res = TIPC_OK;
+       u32 onode;
+       int rc = TIPC_OK;
+
+       if (unlikely(msg_user(msg) == CONN_MANAGER))
+               return tipc_sk_proto_rcv(tsk, &onode, buf);
 
        /* Reject message if it is wrong sort of message for socket */
        if (msg_type(msg) > TIPC_DIRECT_MSG)
-               return TIPC_ERR_NO_PORT;
+               return -TIPC_ERR_NO_PORT;
 
        if (sock->state == SS_READY) {
                if (msg_connected(msg))
-                       return TIPC_ERR_NO_PORT;
+                       return -TIPC_ERR_NO_PORT;
        } else {
-               res = filter_connect(tsk, &buf);
-               if (res != TIPC_OK || buf == NULL)
-                       return res;
+               rc = filter_connect(tsk, &buf);
+               if (rc != TIPC_OK || buf == NULL)
+                       return rc;
        }
 
        /* Reject message if there isn't room to queue it */
        if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
-               return TIPC_ERR_OVERLOAD;
+               return -TIPC_ERR_OVERLOAD;
 
        /* Enqueue message */
        TIPC_SKB_CB(buf)->handle = NULL;
@@ -1429,16 +1556,23 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
 {
-       u32 res;
+       int rc;
+       u32 onode;
        struct tipc_sock *tsk = tipc_sk(sk);
        uint truesize = buf->truesize;
 
-       res = filter_rcv(sk, buf);
-       if (unlikely(res))
-               tipc_reject_msg(buf, res);
+       rc = filter_rcv(sk, buf);
 
-       if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-               atomic_add(truesize, &tsk->dupl_rcvcnt);
+       if (likely(!rc)) {
+               if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+                       atomic_add(truesize, &tsk->dupl_rcvcnt);
+               return 0;
+       }
+
+       if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
+               return 0;
+
+       tipc_link_xmit(buf, onode, 0);
 
        return 0;
 }
@@ -1455,19 +1589,14 @@ int tipc_sk_rcv(struct sk_buff *buf)
        struct tipc_port *port;
        struct sock *sk;
        u32 dport = msg_destport(buf_msg(buf));
-       int err = TIPC_OK;
+       int rc = TIPC_OK;
        uint limit;
+       u32 dnode;
 
-       /* Forward unresolved named message */
-       if (unlikely(!dport)) {
-               tipc_net_route_msg(buf);
-               return 0;
-       }
-
-       /* Validate destination */
+       /* Validate destination and message */
        port = tipc_port_lock(dport);
        if (unlikely(!port)) {
-               err = TIPC_ERR_NO_PORT;
+               rc = tipc_msg_eval(buf, &dnode);
                goto exit;
        }
 
@@ -1478,23 +1607,25 @@ int tipc_sk_rcv(struct sk_buff *buf)
        bh_lock_sock(sk);
 
        if (!sock_owned_by_user(sk)) {
-               err = filter_rcv(sk, buf);
+               rc = filter_rcv(sk, buf);
        } else {
                if (sk->sk_backlog.len == 0)
                        atomic_set(&tsk->dupl_rcvcnt, 0);
                limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
                if (sk_add_backlog(sk, buf, limit))
-                       err = TIPC_ERR_OVERLOAD;
+                       rc = -TIPC_ERR_OVERLOAD;
        }
-
        bh_unlock_sock(sk);
        tipc_port_unlock(port);
 
-       if (likely(!err))
+       if (likely(!rc))
                return 0;
 exit:
-       tipc_reject_msg(buf, err);
-       return -EHOSTUNREACH;
+       if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
+               return -EHOSTUNREACH;
+
+       tipc_link_xmit(buf, dnode, 0);
+       return (rc < 0) ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1758,6 +1889,7 @@ static int tipc_shutdown(struct socket *sock, int how)
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_port *port = &tsk->port;
        struct sk_buff *buf;
+       u32 peer;
        int res;
 
        if (how != SHUT_RDWR)
@@ -1778,7 +1910,8 @@ restart:
                                goto restart;
                        }
                        tipc_port_disconnect(port->ref);
-                       tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
+                       if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN))
+                               tipc_link_xmit(buf, peer, 0);
                } else {
                        tipc_port_shutdown(port->ref);
                }
@@ -1936,7 +2069,7 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
-int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
 {
        struct tipc_sioc_ln_req lnr;
        void __user *argp = (void __user *)arg;
@@ -1952,7 +2085,6 @@ int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
                        return 0;
                }
                return -EADDRNOTAVAIL;
-               break;
        default:
                return -ENOIOCTLCMD;
        }
index 3afcd2a70b313c21d67752606b839e613d3cf9df..43b75b3cecedb9b3bee4fd6a9a42a8f5a4cbcf0b 100644 (file)
@@ -38,6 +38,9 @@
 #include "port.h"
 #include <net/sock.h>
 
+#define TIPC_CONN_OK      0
+#define TIPC_CONN_PROBING 1
+
 /**
  * struct tipc_sock - TIPC socket structure
  * @sk: socket - interacts with 'port' and with user via the socket API
@@ -45,6 +48,9 @@
  * @peer_name: the peer of the connection, if any
  * @conn_timeout: the time we can wait for an unresponded setup request
  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
+ * @link_cong: non-zero if owner must sleep because of link congestion
+ * @sent_unacked: # messages sent by socket, and not yet acked by peer
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
  */
 
 struct tipc_sock {
@@ -52,6 +58,9 @@ struct tipc_sock {
        struct tipc_port port;
        unsigned int conn_timeout;
        atomic_t dupl_rcvcnt;
+       int link_cong;
+       uint sent_unacked;
+       uint rcv_unacked;
 };
 
 static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -69,6 +78,13 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
        tsk->sk.sk_write_space(&tsk->sk);
 }
 
+static inline int tipc_sk_conn_cong(struct tipc_sock *tsk)
+{
+       return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
+}
+
 int tipc_sk_rcv(struct sk_buff *buf);
 
+void tipc_sk_mcast_rcv(struct sk_buff *buf);
+
 #endif
index a1c40654dd9b1ca9b47bbaef6682ee6d49147e66..afee5e0455ea460b9b3d2e39c27e7eb0d881fa9f 100644 (file)
@@ -25,7 +25,6 @@
 #include "sysfs.h"
 #include "debugfs.h"
 #include "wext-compat.h"
-#include "ethtool.h"
 #include "rdev-ops.h"
 
 /* name for sysfs, %d is appended */
@@ -927,8 +926,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                /* allow mac80211 to determine the timeout */
                wdev->ps_timeout = -1;
 
-               netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
-
                if ((wdev->iftype == NL80211_IFTYPE_STATION ||
                     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
                     wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
index d4860bfc020e5a1c43758e8cca6e9508908344be..e9e91298c70de7bdab61b31b795df213c5a71461 100644 (file)
@@ -1,11 +1,9 @@
 #include <linux/utsname.h>
 #include <net/cfg80211.h>
 #include "core.h"
-#include "ethtool.h"
 #include "rdev-ops.h"
 
-static void cfg80211_get_drvinfo(struct net_device *dev,
-                                       struct ethtool_drvinfo *info)
+void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
@@ -23,84 +21,4 @@ static void cfg80211_get_drvinfo(struct net_device *dev,
        strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
                sizeof(info->bus_info));
 }
-
-static int cfg80211_get_regs_len(struct net_device *dev)
-{
-       /* For now, return 0... */
-       return 0;
-}
-
-static void cfg80211_get_regs(struct net_device *dev, struct ethtool_regs *regs,
-                       void *data)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-
-       regs->version = wdev->wiphy->hw_version;
-       regs->len = 0;
-}
-
-static void cfg80211_get_ringparam(struct net_device *dev,
-                                  struct ethtool_ringparam *rp)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-
-       memset(rp, 0, sizeof(*rp));
-
-       if (rdev->ops->get_ringparam)
-               rdev_get_ringparam(rdev, &rp->tx_pending, &rp->tx_max_pending,
-                                  &rp->rx_pending, &rp->rx_max_pending);
-}
-
-static int cfg80211_set_ringparam(struct net_device *dev,
-                                 struct ethtool_ringparam *rp)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-
-       if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
-               return -EINVAL;
-
-       if (rdev->ops->set_ringparam)
-               return rdev_set_ringparam(rdev, rp->tx_pending, rp->rx_pending);
-
-       return -ENOTSUPP;
-}
-
-static int cfg80211_get_sset_count(struct net_device *dev, int sset)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       if (rdev->ops->get_et_sset_count)
-               return rdev_get_et_sset_count(rdev, dev, sset);
-       return -EOPNOTSUPP;
-}
-
-static void cfg80211_get_stats(struct net_device *dev,
-                              struct ethtool_stats *stats, u64 *data)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       if (rdev->ops->get_et_stats)
-               rdev_get_et_stats(rdev, dev, stats, data);
-}
-
-static void cfg80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       if (rdev->ops->get_et_strings)
-               rdev_get_et_strings(rdev, dev, sset, data);
-}
-
-const struct ethtool_ops cfg80211_ethtool_ops = {
-       .get_drvinfo = cfg80211_get_drvinfo,
-       .get_regs_len = cfg80211_get_regs_len,
-       .get_regs = cfg80211_get_regs,
-       .get_link = ethtool_op_get_link,
-       .get_ringparam = cfg80211_get_ringparam,
-       .set_ringparam = cfg80211_set_ringparam,
-       .get_strings = cfg80211_get_strings,
-       .get_ethtool_stats = cfg80211_get_stats,
-       .get_sset_count = cfg80211_get_sset_count,
-};
+EXPORT_SYMBOL(cfg80211_get_drvinfo);
diff --git a/net/wireless/ethtool.h b/net/wireless/ethtool.h
deleted file mode 100644 (file)
index 695ecad..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __CFG80211_ETHTOOL__
-#define __CFG80211_ETHTOOL__
-
-extern const struct ethtool_ops cfg80211_ethtool_ops;
-
-#endif /* __CFG80211_ETHTOOL__ */
index 6668daf6932667bee1f80f6d4c7bdcefef36346c..082f5c62b8cf3c280414bb65b703ca52af13cb1b 100644 (file)
@@ -337,6 +337,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
        [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
        [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
+       [NL80211_ATTR_TDLS_INITIATOR] = { .type = NLA_FLAG },
        [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG },
        [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY,
                                      .len = IEEE80211_MAX_DATA_LEN },
@@ -6011,17 +6012,6 @@ skip_beacons:
                params.radar_required = true;
        }
 
-       /* TODO: I left this here for now.  With channel switch, the
-        * verification is a bit more complicated, because we only do
-        * it later when the channel switch really happens.
-        */
-       err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
-                                          params.chandef.chan,
-                                          CHAN_MODE_SHARED,
-                                          radar_detect_width);
-       if (err)
-               return err;
-
        if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
                params.block_tx = true;
 
@@ -7364,6 +7354,7 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
        u32 peer_capability = 0;
        u16 status_code;
        u8 *peer;
+       bool initiator;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
            !rdev->ops->tdls_mgmt)
@@ -7380,12 +7371,14 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
        action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
        status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
        dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
+       initiator = nla_get_flag(info->attrs[NL80211_ATTR_TDLS_INITIATOR]);
        if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY])
                peer_capability =
                        nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]);
 
        return rdev_tdls_mgmt(rdev, dev, peer, action_code,
                              dialog_token, status_code, peer_capability,
+                             initiator,
                              nla_data(info->attrs[NL80211_ATTR_IE]),
                              nla_len(info->attrs[NL80211_ATTR_IE]));
 }
index d95bbe34813833d04ed618fcd6ca85700718216b..56c2240c30cefcc5a386b4da67a0e49686e47e4a 100644 (file)
@@ -714,25 +714,6 @@ static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
-static inline int rdev_set_ringparam(struct cfg80211_registered_device *rdev,
-                                    u32 tx, u32 rx)
-{
-       int ret;
-       trace_rdev_set_ringparam(&rdev->wiphy, tx, rx);
-       ret = rdev->ops->set_ringparam(&rdev->wiphy, tx, rx);
-       trace_rdev_return_int(&rdev->wiphy, ret);
-       return ret;
-}
-
-static inline void rdev_get_ringparam(struct cfg80211_registered_device *rdev,
-                                     u32 *tx, u32 *tx_max, u32 *rx,
-                                     u32 *rx_max)
-{
-       trace_rdev_get_ringparam(&rdev->wiphy);
-       rdev->ops->get_ringparam(&rdev->wiphy, tx, tx_max, rx, rx_max);
-       trace_rdev_return_void_tx_rx(&rdev->wiphy, *tx, *tx_max, *rx, *rx_max);
-}
-
 static inline int
 rdev_sched_scan_start(struct cfg80211_registered_device *rdev,
                      struct net_device *dev,
@@ -770,15 +751,15 @@ static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev,
                                 struct net_device *dev, u8 *peer,
                                 u8 action_code, u8 dialog_token,
                                 u16 status_code, u32 peer_capability,
-                                const u8 *buf, size_t len)
+                                bool initiator, const u8 *buf, size_t len)
 {
        int ret;
        trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
                             dialog_token, status_code, peer_capability,
-                            buf, len);
+                            initiator, buf, len);
        ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
                                   dialog_token, status_code, peer_capability,
-                                  buf, len);
+                                  initiator, buf, len);
        trace_rdev_return_int(&rdev->wiphy, ret);
        return ret;
 }
@@ -815,35 +796,6 @@ static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev,
        return ret;
 }
 
-static inline int
-rdev_get_et_sset_count(struct cfg80211_registered_device *rdev,
-                      struct net_device *dev, int sset)
-{
-       int ret;
-       trace_rdev_get_et_sset_count(&rdev->wiphy, dev, sset);
-       ret = rdev->ops->get_et_sset_count(&rdev->wiphy, dev, sset);
-       trace_rdev_return_int(&rdev->wiphy, ret);
-       return ret;
-}
-
-static inline void rdev_get_et_stats(struct cfg80211_registered_device *rdev,
-                                    struct net_device *dev,
-                                    struct ethtool_stats *stats, u64 *data)
-{
-       trace_rdev_get_et_stats(&rdev->wiphy, dev);
-       rdev->ops->get_et_stats(&rdev->wiphy, dev, stats, data);
-       trace_rdev_return_void(&rdev->wiphy);
-}
-
-static inline void rdev_get_et_strings(struct cfg80211_registered_device *rdev,
-                                      struct net_device *dev, u32 sset,
-                                      u8 *data)
-{
-       trace_rdev_get_et_strings(&rdev->wiphy, dev, sset);
-       rdev->ops->get_et_strings(&rdev->wiphy, dev, sset, data);
-       trace_rdev_return_void(&rdev->wiphy);
-}
-
 static inline int
 rdev_get_channel(struct cfg80211_registered_device *rdev,
                 struct wireless_dev *wdev,
index 560ed77084e92b52cae0f299ca383eef240a42e6..85474ee501ebbe95599f8ca3f14ba037be690c74 100644 (file)
@@ -298,11 +298,6 @@ DEFINE_EVENT(wiphy_only_evt, rdev_return_void,
        TP_ARGS(wiphy)
 );
 
-DEFINE_EVENT(wiphy_only_evt, rdev_get_ringparam,
-       TP_PROTO(struct wiphy *wiphy),
-       TP_ARGS(wiphy)
-);
-
 DEFINE_EVENT(wiphy_only_evt, rdev_get_antenna,
        TP_PROTO(struct wiphy *wiphy),
        TP_ARGS(wiphy)
@@ -580,11 +575,6 @@ DEFINE_EVENT(wiphy_netdev_evt, rdev_stop_ap,
        TP_ARGS(wiphy, netdev)
 );
 
-DEFINE_EVENT(wiphy_netdev_evt, rdev_get_et_stats,
-       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
-       TP_ARGS(wiphy, netdev)
-);
-
 DEFINE_EVENT(wiphy_netdev_evt, rdev_sched_scan_stop,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev),
        TP_ARGS(wiphy, netdev)
@@ -1439,11 +1429,6 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
                  WIPHY_PR_ARG, __entry->tx, __entry->rx)
 );
 
-DEFINE_EVENT(tx_rx_evt, rdev_set_ringparam,
-       TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
-       TP_ARGS(wiphy, rx, tx)
-);
-
 DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
        TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
        TP_ARGS(wiphy, rx, tx)
@@ -1469,9 +1454,9 @@ TRACE_EVENT(rdev_tdls_mgmt,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
                 u8 *peer, u8 action_code, u8 dialog_token,
                 u16 status_code, u32 peer_capability,
-                const u8 *buf, size_t len),
+                bool initiator, const u8 *buf, size_t len),
        TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
-               peer_capability, buf, len),
+               peer_capability, initiator, buf, len),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                NETDEV_ENTRY
@@ -1480,6 +1465,7 @@ TRACE_EVENT(rdev_tdls_mgmt,
                __field(u8, dialog_token)
                __field(u16, status_code)
                __field(u32, peer_capability)
+               __field(bool, initiator)
                __dynamic_array(u8, buf, len)
        ),
        TP_fast_assign(
@@ -1490,13 +1476,16 @@ TRACE_EVENT(rdev_tdls_mgmt,
                __entry->dialog_token = dialog_token;
                __entry->status_code = status_code;
                __entry->peer_capability = peer_capability;
+               __entry->initiator = initiator;
                memcpy(__get_dynamic_array(buf), buf, len);
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, "
-                 "dialog_token: %u, status_code: %u, peer_capability: %u buf: %#.2x ",
+                 "dialog_token: %u, status_code: %u, peer_capability: %u "
+                 "initiator: %s buf: %#.2x ",
                  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
                  __entry->action_code, __entry->dialog_token,
                  __entry->status_code, __entry->peer_capability,
+                 BOOL_TO_STR(__entry->initiator),
                  ((u8 *)__get_dynamic_array(buf))[0])
 );
 
@@ -1725,40 +1714,6 @@ TRACE_EVENT(rdev_set_noack_map,
                  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
 );
 
-TRACE_EVENT(rdev_get_et_sset_count,
-       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int sset),
-       TP_ARGS(wiphy, netdev, sset),
-       TP_STRUCT__entry(
-               WIPHY_ENTRY
-               NETDEV_ENTRY
-               __field(int, sset)
-       ),
-       TP_fast_assign(
-               WIPHY_ASSIGN;
-               NETDEV_ASSIGN;
-               __entry->sset = sset;
-       ),
-       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %d",
-                 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
-);
-
-TRACE_EVENT(rdev_get_et_strings,
-       TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 sset),
-       TP_ARGS(wiphy, netdev, sset),
-       TP_STRUCT__entry(
-               WIPHY_ENTRY
-               NETDEV_ENTRY
-               __field(u32, sset)
-       ),
-       TP_fast_assign(
-               WIPHY_ASSIGN;
-               NETDEV_ASSIGN;
-               __entry->sset = sset;
-       ),
-       TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", sset: %u",
-                 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sset)
-);
-
 DEFINE_EVENT(wiphy_wdev_evt, rdev_get_channel,
        TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
        TP_ARGS(wiphy, wdev)