Merge tag 'upstream-4.20-rc7' of git://git.infradead.org/linux-ubifs
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Dec 2018 22:17:24 +0000 (14:17 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 20 Dec 2018 22:17:24 +0000 (14:17 -0800)
Pull UBI/UBIFS fixes from Richard Weinberger:

 - Kconfig dependency fixes for our new auth feature

 - Fix for selecting the right compressor when creating a fs

 - Bugfix for a bug in UBIFS's O_TMPFILE implementation

 - Refcounting fixes for UBI

* tag 'upstream-4.20-rc7' of git://git.infradead.org/linux-ubifs:
  ubifs: Handle re-linking of inodes correctly while recovery
  ubi: Do not drop UBI device reference before using
  ubi: Put MTD device after it is not used
  ubifs: Fix default compression selection in ubifs
  ubifs: Fix memory leak on error condition
  ubifs: auth: Add CONFIG_KEYS dependency
  ubifs: CONFIG_UBIFS_FS_AUTHENTICATION should depend on UBIFS_FS
  ubifs: replay: Fix high stack usage

314 files changed:
CREDITS
Documentation/core-api/xarray.rst
Documentation/media/uapi/v4l/extended-controls.rst
MAINTAINERS
Makefile
arch/alpha/kernel/setup.c
arch/alpha/mm/numa.c
arch/arm/boot/dts/arm-realview-pb1176.dts
arch/arm/boot/dts/arm-realview-pb11mp.dts
arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
arch/arm/boot/dts/bcm2837-rpi-3-b.dts
arch/arm/boot/dts/imx7d-nitrogen7.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
arch/arm/mach-imx/cpuidle-imx6sx.c
arch/arm/mach-mmp/cputype.h
arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
arch/arm64/boot/dts/mediatek/mt7622.dtsi
arch/arm64/include/asm/memory.h
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/m68k/kernel/setup_mm.c
arch/m68k/mm/motorola.c
arch/powerpc/boot/Makefile
arch/powerpc/boot/crt0.S
arch/powerpc/include/asm/perf_event.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/bpf_perf_event.h [new file with mode: 0644]
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/msi.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/dump_linuxpagetables.c
arch/powerpc/mm/init_64.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/platforms/pseries/papr_scm.c
arch/sh/include/asm/io.h
arch/x86/include/asm/msr-index.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/bio.c
block/blk-zoned.c
drivers/clk/qcom/gcc-qcs404.c
drivers/crypto/chelsio/chtls/chtls.h
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/crypto/chelsio/chtls/chtls_main.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/intel_workarounds.h
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
drivers/hid/hid-ids.h
drivers/hid/hid-ite.c
drivers/hid/hid-quirks.c
drivers/hv/Kconfig
drivers/hv/vmbus_drv.c
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/odp.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/media/Kconfig
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/media-device.c
drivers/media/platform/vicodec/vicodec-core.c
drivers/media/platform/vivid/vivid-sdr-cap.c
drivers/media/platform/vivid/vivid-vbi-cap.c
drivers/media/platform/vivid/vivid-vbi-out.c
drivers/media/platform/vivid/vivid-vid-cap.c
drivers/media/platform/vivid/vivid-vid-out.c
drivers/media/platform/vsp1/vsp1_lif.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/omap.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/phy/phy_device.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/marvell/mwifiex/11n.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/xen-netfront.c
drivers/pci/pcie/aer.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/qcom/pinctrl-sdm660.c
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/qla2xxx/qla_os.c
drivers/staging/media/sunxi/cedrus/Kconfig
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
drivers/thermal/hisi_thermal.c
drivers/thermal/st/stm_thermal.c
drivers/tty/serial/8250/8250_port.c
drivers/uio/uio_hv_generic.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci.h
drivers/usb/serial/option.c
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/video/backlight/pwm_bl.c
fs/aio.c
fs/ceph/super.c
fs/ceph/super.h
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/inode.c
fs/userfaultfd.c
include/asm-generic/fixmap.h
include/linux/filter.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netfilter/nfnetlink.h
include/linux/t10-pi.h
include/linux/xarray.h
include/media/mpeg2-ctrls.h [new file with mode: 0644]
include/media/v4l2-ctrls.h
include/media/videobuf2-core.h
include/net/ip_tunnels.h
include/net/sock.h
include/net/tls.h
include/net/xfrm.h
include/uapi/asm-generic/Kbuild.asm
include/uapi/linux/blkzoned.h
include/uapi/linux/if_tunnel.h
include/uapi/linux/in.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/net_tstamp.h
include/uapi/linux/netlink.h
include/uapi/linux/v4l2-controls.h
include/uapi/linux/videodev2.h
init/Kconfig
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/dma/direct.c
kernel/trace/ftrace.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_trigger.c
lib/radix-tree.c
lib/test_xarray.c
lib/xarray.c
mm/hugetlb.c
mm/memblock.c
mm/shmem.c
mm/sparse.c
net/can/raw.c
net/core/flow_dissector.c
net/core/gro_cells.c
net/core/neighbour.c
net/core/sysctl_net_core.c
net/ipv4/devinet.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/raw.c
net/ipv6/ip6_output.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/raw.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/status.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_seqadj.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/rds/message.c
net/rds/rdma.c
net/rds/rds.h
net/rds/send.c
net/sched/cls_flower.c
net/sctp/ipv6.c
net/smc/af_smc.c
net/smc/smc.h
net/sunrpc/clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/socket.c
net/tipc/udp_media.c
net/tls/tls_main.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/checkstack.pl
scripts/spdxcheck.py
security/integrity/ima/ima_policy.c
security/keys/keyctl_pkey.c
security/keys/trusted.c
sound/firewire/fireface/ff-protocol-ff400.c
sound/pci/hda/patch_realtek.c
tools/include/uapi/linux/netlink.h
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/main.c
tools/testing/radix-tree/regression.h
tools/testing/radix-tree/regression4.c [new file with mode: 0644]
tools/testing/selftests/bpf/bpf_flow.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/test_vxlan_fdb_changelink.sh [new file with mode: 0755]
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/virtio/linux/kernel.h
virt/kvm/coalesced_mmio.c

diff --git a/CREDITS b/CREDITS
index c9273393fe14c12d1eb32f2ce65aba527a00c928..7d397ee675242954fa00269477522cdfdd885068 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2541,6 +2541,10 @@ S: Ormond
 S: Victoria 3163
 S: Australia
 
+N: Eric Miao
+E: eric.y.miao@gmail.com
+D: MMP support
+
 N: Pauline Middelink
 E: middelin@polyware.nl
 D: General low-level bug fixes, /proc fixes, identd support
@@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5
 S: Bellevue, Washington 98007
 S: USA
 
+N: Haojian Zhuang
+E: haojian.zhuang@gmail.com
+D: MMP support
+
 N: Richard Zidlicky
 E: rz@linux-m68k.org, rdzidlic@geocities.com
 W: http://www.geocities.com/rdzidlic
index dbe96cb5558ef5024fd9607a902c25ad430b4cb7..6a6d67acaf690abfadc40c05e97a4557060dc188 100644 (file)
@@ -187,6 +187,8 @@ Takes xa_lock internally:
  * :c:func:`xa_erase_bh`
  * :c:func:`xa_erase_irq`
  * :c:func:`xa_cmpxchg`
+ * :c:func:`xa_cmpxchg_bh`
+ * :c:func:`xa_cmpxchg_irq`
  * :c:func:`xa_store_range`
  * :c:func:`xa_alloc`
  * :c:func:`xa_alloc_bh`
@@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
 context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
 in the interrupt handler.  Some of the more common patterns have helper
 functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
-:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
+:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
+and :c:func:`xa_cmpxchg_irq`.
 
 Sometimes you need to protect access to the XArray with a mutex because
 that lock sits above another mutex in the locking hierarchy.  That does
index 65a1d873196b6e741fc57da5658bebdc1a8f9d25..027358b91082534edb2f6da8955689ec20868ba3 100644 (file)
@@ -1505,6 +1505,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
     configuring a stateless hardware decoding pipeline for MPEG-2.
     The bitstream parameters are defined according to :ref:`mpeg2part2`.
 
+    .. note::
+
+       This compound control is not yet part of the public kernel API and
+       it is expected to change.
+
 .. c:type:: v4l2_ctrl_mpeg2_slice_params
 
 .. cssclass:: longtable
@@ -1625,6 +1630,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
     Specifies quantization matrices (as extracted from the bitstream) for the
     associated MPEG-2 slice data.
 
+    .. note::
+
+       This compound control is not yet part of the public kernel API and
+       it is expected to change.
+
 .. c:type:: v4l2_ctrl_mpeg2_quantization
 
 .. cssclass:: longtable
index 8119141a926f3a577b0a351caa71bd39a894f65f..21ce799084182b62e4ce37ccba6ab3dccc2d24e7 100644 (file)
@@ -1739,13 +1739,17 @@ ARM/Mediatek SoC support
 M:     Matthias Brugger <matthias.bgg@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+W:     https://mtk.bcnfs.org/
+C:     irc://chat.freenode.net/linux-mediatek
 S:     Maintained
 F:     arch/arm/boot/dts/mt6*
 F:     arch/arm/boot/dts/mt7*
 F:     arch/arm/boot/dts/mt8*
 F:     arch/arm/mach-mediatek/
 F:     arch/arm64/boot/dts/mediatek/
+F:     drivers/soc/mediatek/
 N:     mtk
+N:     mt[678]
 K:     mediatek
 
 ARM/Mediatek USB3 PHY DRIVER
@@ -4843,6 +4847,7 @@ F:        include/uapi/drm/vmwgfx_drm.h
 
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
+M:     Daniel Vetter <daniel@ffwll.ch>
 L:     dri-devel@lists.freedesktop.org
 T:     git git://anongit.freedesktop.org/drm/drm
 B:     https://bugs.freedesktop.org/
@@ -6901,8 +6906,10 @@ Hyper-V CORE AND DRIVERS
 M:     "K. Y. Srinivasan" <kys@microsoft.com>
 M:     Haiyang Zhang <haiyangz@microsoft.com>
 M:     Stephen Hemminger <sthemmin@microsoft.com>
+M:     Sasha Levin <sashal@kernel.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
 L:     devel@linuxdriverproject.org
-S:     Maintained
+S:     Supported
 F:     Documentation/networking/netvsc.txt
 F:     arch/x86/include/asm/mshyperv.h
 F:     arch/x86/include/asm/trace/hyperv.h
@@ -8938,7 +8945,7 @@ F:        arch/mips/boot/dts/img/pistachio_marduk.dts
 
 MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Vivien Didelot <vivien.didelot@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/dsa/mv88e6xxx/
@@ -9443,6 +9450,13 @@ F:       drivers/media/platform/mtk-vpu/
 F:     Documentation/devicetree/bindings/media/mediatek-vcodec.txt
 F:     Documentation/devicetree/bindings/media/mediatek-vpu.txt
 
+MEDIATEK MT76 WIRELESS LAN DRIVER
+M:     Felix Fietkau <nbd@nbd.name>
+M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wireless/mediatek/mt76/
+
 MEDIATEK MT7601U WIRELESS LAN DRIVER
 M:     Jakub Kicinski <kubakici@wp.pl>
 L:     linux-wireless@vger.kernel.org
@@ -10005,12 +10019,9 @@ S:     Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MMP SUPPORT
-M:     Eric Miao <eric.y.miao@gmail.com>
-M:     Haojian Zhuang <haojian.zhuang@gmail.com>
+R:     Lubomir Rintel <lkundrak@v3.sk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T:     git git://github.com/hzhuang1/linux.git
-T:     git git://git.linaro.org/people/ycmiao/pxa-linux.git
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
@@ -10416,7 +10427,7 @@ F:      drivers/net/wireless/
 
 NETWORKING [DSA]
 M:     Andrew Lunn <andrew@lunn.ch>
-M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M:     Vivien Didelot <vivien.didelot@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/dsa/
index f2c3423c3062f2b704c239621d2093cc45280060..d45856f80057d61e66fed31e48ca858c08b3d88d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 20
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
@@ -962,11 +962,6 @@ ifdef CONFIG_STACK_VALIDATION
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    ifdef CONFIG_UNWINDER_ORC
-      $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    else
-      $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
-    endif
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
@@ -1125,6 +1120,14 @@ uapi-asm-generic:
 
 PHONY += prepare-objtool
 prepare-objtool: $(objtool_target)
+ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_UNWINDER_ORC
+       @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+       @false
+else
+       @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+endif
+endif
 
 # Generate some files
 # ---------------------------------------------------------------------------
index a37fd990bd5548933d89ce3949341f9e8a17d008..4b5b1b244f86a108bdda89f33316e84c77950df3 100644 (file)
@@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
 
        /* Find our memory.  */
        setup_memory(kernel_end);
+       memblock_set_bottom_up(true);
 
        /* First guess at cpu cache sizes.  Do this before init_arch.  */
        determine_cpu_caches(cpu->type);
index 74846553e3f18682a958bf6373923830f2b8168a..d0b73371e985ce70a5d7f0845b14911f2a1d62cb 100644 (file)
@@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
        if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
                panic("kernel loaded out of ram");
 
+       memblock_add(PFN_PHYS(node_min_pfn),
+                    (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
+
        /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
           Note that we round this down, not up - node memory
           has much larger alignment than 8Mb, so it's safe. */
        node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
 
-       memblock_add(PFN_PHYS(node_min_pfn),
-                    (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
-
        NODE_DATA(nid)->node_start_pfn = node_min_pfn;
        NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
 
index f2a1d25eb6cf3f1249bcc2166bee7f86a75b2a6d..83e0fbc4a1a10cf4e4b9418b9646d7f06aaea181 100644 (file)
@@ -45,7 +45,7 @@
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 7f9cbdf33a51009ffea23fa488ec8f1e6abe7914..2f6aa24a0b67c707068bba9fb7902525ff1158c0 100644 (file)
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 4adb85e66be3f975894cb712211478439709f274..93762244be7f469a64d9158b2a904e0b8cde1fdd 100644 (file)
@@ -31,7 +31,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index c318bcbc6ba7e327bcf164fd543cd47c04c52d2c..89e6fd547c7572f6bc7d243e5055da4e33dc94a2 100644 (file)
@@ -26,7 +26,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index d8aac4a2d02a2489d1843e2d22f2f4cb317eb481..177d21fdeb288d3e458176ddf0d537a746f0d2ba 100644 (file)
                compatible = "regulator-fixed";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
-               clock-names = "slow";
                regulator-name = "reg_wlan";
                startup-delay-us = <70000>;
                gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
                enable-active-high;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
 };
 
 &adc1 {
        bus-width = <4>;
        non-removable;
        vmmc-supply = <&reg_wlan>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        cap-power-off-card;
        keep-power-in-suspend;
        status = "okay";
index 21973eb55671920148e25e6a6b0e1c469093bc8e..f27b3849d3ff3ed91d7e2d504206c4c8a7d8045b 100644 (file)
                regulator-min-microvolt = <1800000>;
                regulator-max-microvolt = <1800000>;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
+};
+
+&clks {
+       assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+                         <&clks IMX7D_CLKO2_ROOT_DIV>;
+       assigned-clock-parents = <&clks IMX7D_CKIL>;
+       assigned-clock-rates = <0>, <32768>;
 };
 
 &i2c4 {
 
 &usdhc2 { /* Wifi SDIO */
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_usdhc2>;
+       pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
        no-1-8-v;
        non-removable;
        keep-power-in-suspend;
        wakeup-source;
        vmmc-supply = <&reg_ap6212>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        status = "okay";
 };
 
 };
 
 &iomuxc_lpsr {
+       pinctrl_wifi_clk: wificlkgrp {
+               fsl,pins = <
+                       MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2     0x7d
+               >;
+       };
+
        pinctrl_wdog: wdoggrp {
                fsl,pins = <
                        MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B  0x74
index 742d2946b08be48d205bee2ae6041632b27dccf1..583a5a01642f2f36dc8887acf869c0480f47162a 100644 (file)
 
 &reg_dldo3 {
        regulator-always-on;
-       regulator-min-microvolt = <2500000>;
-       regulator-max-microvolt = <2500000>;
+       regulator-min-microvolt = <3300000>;
+       regulator-max-microvolt = <3300000>;
        regulator-name = "vcc-pd";
 };
 
index 243a108a940b46c9c0d9b13d2802beb11f84d2d9..fd0053e47a151179db824e1aac6fe1526d20327e 100644 (file)
@@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
         * except for power up sw2iso which need to be
         * larger than LDO ramp up time.
         */
-       imx_gpc_set_arm_power_up_timing(2, 1);
+       imx_gpc_set_arm_power_up_timing(0xf, 1);
        imx_gpc_set_arm_power_down_timing(1, 1);
 
        return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
index 446edaeb78a71d07a8c719732455589ffa67b49e..a96abcf521b4b095a13658e51f409884d89b35b5 100644 (file)
@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
 #define cpu_is_pxa910()        (0)
 #endif
 
-#ifdef CONFIG_CPU_MMP2
+#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
 static inline int cpu_is_mmp2(void)
 {
-       return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+       return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+               (((mmp_chip_id & 0xfff) == 0x410) ||
+                ((mmp_chip_id & 0xfff) == 0x610));
 }
 #else
 #define cpu_is_mmp2()  (0)
index 64632c8738887804df7a81d5f2b5715280bfffb6..01ea662afba876638c1d44fc0d89f3e52ad4e37e 100644 (file)
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x000>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x001>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu2: cpu@100 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x100>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu3: cpu@101 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x101>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
        };
 };
index 073610ac0a53e8dcd1786b4b8c8c59b7b955ef50..7d94c1fa592a064d2d42709ce9d6a198ef008eb6 100644 (file)
                method = "smc";
        };
 
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               idle_states {
-                       entry_method = "arm,pcsi";
-
-                       CPU_SLEEP_0: cpu-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x0010000>;
-                               entry-latency-us = <80>;
-                               exit-latency-us  = <160>;
-                               min-residency-us = <320>;
-                       };
-
-                       CLUSTER_SLEEP_0: cluster-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x1010000>;
-                               entry-latency-us = <500>;
-                               exit-latency-us = <1000>;
-                               min-residency-us = <2500>;
-                       };
-               };
-       };
-
        ap806 {
                #address-cells = <2>;
                #size-cells = <2>;
index 5d6005c9b097529522b9112d3682d636b633f61b..710c5c3d87d30ef7dcb1657bb2081883f8b04aa0 100644 (file)
        model = "Bananapi BPI-R64";
        compatible = "bananapi,bpi-r64", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index dcad0869b84ca01dc76e2924506583daf1c65c54..3f783348c66a690f3acce45ff76bd5da7c14f48a 100644 (file)
        model = "MediaTek MT7622 RFB1 board";
        compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index fe0c875f1d9513538e5a18c74f641557d89675c8..14a1028ca3a64bd54bd21608655dcf1895d25fa1 100644 (file)
                #reset-cells = <1>;
        };
 
-       timer: timer@10004000 {
-               compatible = "mediatek,mt7622-timer",
-                            "mediatek,mt6577-timer";
-               reg = <0 0x10004000 0 0x80>;
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
-               clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
-                        <&topckgen CLK_TOP_RTC>;
-               clock-names = "system-clk", "rtc-clk";
-       };
-
        scpsys: scpsys@10006000 {
                compatible = "mediatek,mt7622-scpsys",
                             "syscon";
index b96442960aead1692e94d28dd35d33e7954d84b3..f0a5c9531e8bbb455a2ebb94f46ee4efade8e854 100644 (file)
  */
 #define PCI_IO_SIZE            SZ_16M
 
-/*
- * Log2 of the upper bound of the size of a struct page. Used for sizing
- * the vmemmap region only, does not affect actual memory footprint.
- * We don't use sizeof(struct page) directly since taking its size here
- * requires its definition to be available at this point in the inclusion
- * chain, and it may not be a power of 2 in the first place.
- */
-#define STRUCT_PAGE_MAX_SHIFT  6
-
 /*
  * VMEMMAP_SIZE - allows the whole linear region to be covered by
  *                a struct page array
index a3ac262848451ae49535c37a6997a211b0f5e914..a5370440609917c10fb2f2c4ffbc26f5e1a1ba9f 100644 (file)
@@ -429,9 +429,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                   prot,
                                                   __builtin_return_address(0));
                if (addr) {
-                       memset(addr, 0, size);
                        if (!coherent)
                                __dma_flush_area(page_to_virt(page), iosize);
+                       memset(addr, 0, size);
                } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
index 9b432d9fcada8dac8e7b1041437387f29785b2af..0340e45655c687ebc3b223cdee374d97e0e722c7 100644 (file)
@@ -610,14 +610,6 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32                       > TASK_SIZE_64);
 #endif
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-       /*
-        * Make sure we chose the upper bound of sizeof(struct page)
-        * correctly when sizing the VMEMMAP array.
-        */
-       BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
-#endif
-
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
                /*
index a1a3eaeaf58c960df8e406dca1fcdf0e434470a2..ad0195cbe04255eada56bc8cff364086dc38642e 100644 (file)
@@ -164,8 +164,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                        be32_to_cpu(m->addr);
                                m68k_memory[m68k_num_memory].size =
                                        be32_to_cpu(m->size);
-                               memblock_add(m68k_memory[m68k_num_memory].addr,
-                                            m68k_memory[m68k_num_memory].size);
                                m68k_num_memory++;
                        } else
                                pr_warn("%s: too many memory chunks\n",
index 7497cf30bf1cd41b51afd915a45b5b80fd9dd939..3f3d0bf360910c0d45095a0ef51db604afe5b02b 100644 (file)
@@ -228,6 +228,7 @@ void __init paging_init(void)
 
        min_addr = m68k_memory[0].addr;
        max_addr = min_addr + m68k_memory[0].size;
+       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
        for (i = 1; i < m68k_num_memory;) {
                if (m68k_memory[i].addr < min_addr) {
                        printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@ void __init paging_init(void)
                                (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
                        continue;
                }
+               memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
                addr = m68k_memory[i].addr + m68k_memory[i].size;
                if (addr > max_addr)
                        max_addr = addr;
index 39354365f54a5ebcbf0b087155e698ebf8337137..ed9883169190577f86bc77059eaefdd5d1f3caff 100644 (file)
@@ -197,7 +197,7 @@ $(obj)/empty.c:
 $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
        $(Q)cp $< $@
 
-$(obj)/serial.c: $(obj)/autoconf.h
+$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
 
 $(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
        $(Q)cp $< $@
index 32dfe6d083f3257c41b8e0de835eff8ccd16491c..9b9d17437373bfd7ce821c87e4a98c36e7ef371a 100644 (file)
@@ -15,7 +15,7 @@
 RELA = 7
 RELACOUNT = 0x6ffffff9
 
-       .text
+       .data
        /* A procedure descriptor used when booting this as a COFF file.
         * When making COFF, this comes first in the link and we're
         * linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
        .globl  _zimage_start_opd
 _zimage_start_opd:
        .long   0x500000, 0, 0, 0
+       .text
+       b       _zimage_start
 
 #ifdef __powerpc64__
 .balign 8
index 8bf1b6351716ee424d80bf240bbcea54abc35c9e..16a49819da9a993046e5cf15c7de2ef523e7c2fe 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/ptrace.h>
 #include <asm/reg.h>
 
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
+
 /*
  * Overload regs->result to specify whether we should use the MSR (result
  * is zero) or the SIAR (result is non zero).
index a658091a19f901c8283cba392e17da0e960970d2..3712152206f3ef8b2b9ca73d76f05af2d46688fc 100644 (file)
@@ -1,7 +1,6 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-generic-y += bpf_perf_event.h
 generic-y += param.h
 generic-y += poll.h
 generic-y += resource.h
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
new file mode 100644 (file)
index 0000000..b551b74
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
index 33b34a58fc62f50b9d7cb8ad1e82c6eab46bb39f..5b9dce17f0c926c9f4c9c838a06658920086bcdb 100644 (file)
@@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
 
        /* Now find out if one of these is out firmware console */
        path = of_get_property(of_chosen, "linux,stdout-path", NULL);
+       if (path == NULL)
+               path = of_get_property(of_chosen, "stdout-path", NULL);
        if (path != NULL) {
                stdout = of_find_node_by_path(path);
                if (stdout)
@@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
        /* We are getting a weird phandle from OF ... */
        /* ... So use the full path instead */
        name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+       if (name == NULL)
+               name = of_get_property(of_chosen, "stdout-path", NULL);
        if (name == NULL) {
-               DBG(" no linux,stdout-path !\n");
+               DBG(" no stdout-path !\n");
                return -ENODEV;
        }
        prom_stdout = of_find_node_by_path(name);
index dab616a33b8dbe283aa46c05b5492af69190650f..f2197654be070721abd7bd263b68ae2ddd8094eb 100644 (file)
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
 {
        struct pci_controller *phb = pci_bus_to_host(dev->bus);
 
-       phb->controller_ops.teardown_msi_irqs(dev);
+       /*
+        * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
+        * so check the pointer again.
+        */
+       if (phb->controller_ops.teardown_msi_irqs)
+               phb->controller_ops.teardown_msi_irqs(dev);
 }
index afb819f4ca68bee0f88fab357ffe9395508dc106..714c3480c52dcca8761a9d9e2a24e9eaad0a6256 100644 (file)
@@ -3266,12 +3266,17 @@ long do_syscall_trace_enter(struct pt_regs *regs)
        user_exit();
 
        if (test_thread_flag(TIF_SYSCALL_EMU)) {
-               ptrace_report_syscall(regs);
                /*
+                * A nonzero return code from tracehook_report_syscall_entry()
+                * tells us to prevent the syscall execution, but we are not
+                * going to execute it anyway.
+                *
                 * Returning -1 will skip the syscall execution. We want to
                 * avoid clobbering any register also, thus, not 'gotoing'
                 * skip label.
                 */
+               if (tracehook_report_syscall_entry(regs))
+                       ;
                return -1;
        }
 
index 2b74f8adf4d009b101fde1c3873c1bd4ec0833a4..6aa41669ac1aec6b0db81da31d45b47bbade30c1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/io.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <asm/fixmap.h>
index 7a9886f98b0c12e8df43fe0e8a897a7d4cbeac9d..a5091c03474753111f77df8de2910152ee38abb8 100644 (file)
@@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
        pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
 
        for (; start < end; start += page_size) {
-               void *p;
+               void *p = NULL;
                int rc;
 
                if (vmemmap_populated(start, page_size))
                        continue;
 
+               /*
+                * Allocate from the altmap first if we have one. This may
+                * fail due to alignment issues when using 16MB hugepages, so
+                * fall back to system memory if the altmap allocation fail.
+                */
                if (altmap)
                        p = altmap_alloc_block_buf(page_size, altmap);
-               else
+               if (!p)
                        p = vmemmap_alloc_block_buf(page_size, node);
                if (!p)
                        return -ENOMEM;
@@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
 {
        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
        unsigned long page_order = get_order(page_size);
+       unsigned long alt_start = ~0, alt_end = ~0;
+       unsigned long base_pfn;
 
        start = _ALIGN_DOWN(start, page_size);
+       if (altmap) {
+               alt_start = altmap->base_pfn;
+               alt_end = altmap->base_pfn + altmap->reserve +
+                         altmap->free + altmap->alloc + altmap->align;
+       }
 
        pr_debug("vmemmap_free %lx...%lx\n", start, end);
 
@@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
                page = pfn_to_page(addr >> PAGE_SHIFT);
                section_base = pfn_to_page(vmemmap_section_start(start));
                nr_pages = 1 << page_order;
+               base_pfn = PHYS_PFN(addr);
 
-               if (altmap) {
+               if (base_pfn >= alt_start && base_pfn < alt_end) {
                        vmem_altmap_free(altmap, nr_pages);
                } else if (PageReserved(page)) {
                        /* allocated from bootmem */
index 2e4bd32154b5d756396e3793ed7e6066cd572af3..472b784f01ebf0b9106a0753d191c059f3406307 100644 (file)
@@ -140,8 +140,7 @@ config IBMEBUS
          Bus device driver for GX bus based adapters.
 
 config PAPR_SCM
-       depends on PPC_PSERIES && MEMORY_HOTPLUG
-       select LIBNVDIMM
+       depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
        tristate "Support for the PAPR Storage Class Memory interface"
        help
          Enable access to hypervisor provided storage class memory.
index ee9372b65ca5cf053762d34b0079276becd56779..7d6457ab5d3450f0db4d6cc25e2067c80db59f13 100644 (file)
@@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
        do {
                rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
                                p->blocks, BIND_ANY_ADDR, token);
-               token = be64_to_cpu(ret[0]);
+               token = ret[0];
                cond_resched();
        } while (rc == H_BUSY);
 
@@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                return -ENXIO;
        }
 
-       p->bound_addr = be64_to_cpu(ret[1]);
+       p->bound_addr = ret[1];
 
        dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
 
@@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
        do {
                rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
                                p->bound_addr, p->blocks, token);
-               token = be64_to_cpu(ret);
+               token = ret[0];
                cond_resched();
        } while (rc == H_BUSY);
 
@@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
                goto err;
        }
 
+       if (nvdimm_bus_check_dimm_count(p->bus, 1))
+               goto err;
+
        /* now add the region */
 
        memset(&mapping, 0, sizeof(mapping));
@@ -257,9 +260,12 @@ err:       nvdimm_bus_unregister(p->bus);
 
 static int papr_scm_probe(struct platform_device *pdev)
 {
-       uint32_t drc_index, metadata_size, unit_cap[2];
        struct device_node *dn = pdev->dev.of_node;
+       u32 drc_index, metadata_size;
+       u64 blocks, block_size;
        struct papr_scm_priv *p;
+       const char *uuid_str;
+       u64 uuid[2];
        int rc;
 
        /* check we have all the required DT properties */
@@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) {
-               dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn);
+       if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
+               dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
+               return -ENODEV;
+       }
+
+       if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
+               dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
+               return -ENODEV;
+       }
+
+       if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
+               dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
                return -ENODEV;
        }
 
@@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev)
 
        p->dn = dn;
        p->drc_index = drc_index;
-       p->block_size = unit_cap[0];
-       p->blocks     = unit_cap[1];
+       p->block_size = block_size;
+       p->blocks = blocks;
+
+       /* We just need to ensure that set cookies are unique across */
+       uuid_parse(uuid_str, (uuid_t *) uuid);
+       p->nd_set.cookie1 = uuid[0];
+       p->nd_set.cookie2 = uuid[1];
 
        /* might be zero */
        p->metadata_size = metadata_size;
@@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev)
 
        /* setup the resource for the newly bound range */
        p->res.start = p->bound_addr;
-       p->res.end   = p->bound_addr + p->blocks * p->block_size;
+       p->res.end   = p->bound_addr + p->blocks * p->block_size - 1;
        p->res.name  = pdev->name;
        p->res.flags = IORESOURCE_MEM;
 
index 98cb8c802b1a8cccafb1cd52d4717a149490792c..4f7f235f15f856775ee2fbdae67e06ad20fdc363 100644 (file)
@@ -24,6 +24,7 @@
 #define __IO_PREFIX     generic
 #include <asm/io_generic.h>
 #include <asm/io_trapped.h>
+#include <asm-generic/pci_iomap.h>
 #include <mach/mangle-port.h>
 
 #define __raw_writeb(v,a)      (__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
index c8f73efb4eceb82391bf908f1f8f292586be5925..9e39cc8bd989855cc2ca8c349c335fe36d029b37 100644 (file)
 #define MSR_F15H_NB_PERF_CTR           0xc0010241
 #define MSR_F15H_PTSC                  0xc0010280
 #define MSR_F15H_IC_CFG                        0xc0011021
+#define MSR_F15H_EX_CFG                        0xc001102c
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 02edd9960e9d94cf8cbac80ea1bfccc5673f3089..8d5d984541beaa111b94e59e512871f91c08a7da 100644 (file)
@@ -11985,6 +11985,8 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                        kunmap(vmx->nested.pi_desc_page);
                        kvm_release_page_dirty(vmx->nested.pi_desc_page);
                        vmx->nested.pi_desc_page = NULL;
+                       vmx->nested.pi_desc = NULL;
+                       vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
                }
                page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
                if (is_error_page(page))
index d02937760c3ba8adc6de37ed4b39db9a926f320d..f049ecfac7bb8a8cd7780efd9b40fbbdf8a7d089 100644 (file)
@@ -2426,6 +2426,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_PATCH_LOADER:
        case MSR_AMD64_BU_CFG2:
        case MSR_AMD64_DC_CFG:
+       case MSR_F15H_EX_CFG:
                break;
 
        case MSR_IA32_UCODE_REV:
@@ -2721,6 +2722,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_BU_CFG2:
        case MSR_IA32_PERF_CTL:
        case MSR_AMD64_DC_CFG:
+       case MSR_F15H_EX_CFG:
                msr_info->data = 0;
                break;
        case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -7446,7 +7448,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
 
 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 {
-       if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+       if (!kvm_apic_present(vcpu))
                return;
 
        bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
index 4f4d9884443b63a8f002ddd754ea467f9a0e4c16..4d86e90654b20ff284df67ad594fd18013a14b60 100644 (file)
@@ -1261,7 +1261,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
                if (ret)
                        goto cleanup;
        } else {
-               zero_fill_bio(bio);
+               if (bmd->is_our_pages)
+                       zero_fill_bio(bio);
                iov_iter_advance(iter, bio->bi_iter.bi_size);
        }
 
index 13ba2011a306a8c4c52fe07bc7a5b147e86472a6..a327bef076422e52501be27ad4c7c4dee42dbb64 100644 (file)
@@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
        struct page *page;
        int order;
 
-       for (order = get_order(size); order > 0; order--) {
+       for (order = get_order(size); order >= 0; order--) {
                page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
                if (page) {
                        *nr_zones = min_t(unsigned int, *nr_zones,
index ef1b267cb058a4a03f0ead86218ee165653fd737..64da032bb9edb35d571f013ca37582179493961c 100644 (file)
@@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
                .hw.init = &(struct clk_init_data){
                        .name = "gpll0_out_main",
                        .parent_names = (const char *[])
-                                       { "gpll0_sleep_clk_src" },
+                                       { "cxo" },
                        .num_parents = 1,
                        .ops = &clk_alpha_pll_ops,
                },
index 7725b6ee14efb2ecc89d9c0822aa19903284d3f5..59bb67d5a7cede7198c652b53550941754cb9a3e 100644 (file)
@@ -153,6 +153,11 @@ struct chtls_dev {
        unsigned int cdev_state;
 };
 
+struct chtls_listen {
+       struct chtls_dev *cdev;
+       struct sock *sk;
+};
+
 struct chtls_hws {
        struct sk_buff_head sk_recv_queue;
        u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
        u16 resv2;
        u32 delack_mode;
        u32 delack_seq;
+       u32 snd_win;
+       u32 rcv_win;
 
        void *passive_reap_next;        /* placeholder for passive */
        struct chtls_hws tlshws;
index 20209e29f814659227ef11861571c9ed6d7ace9d..931b96c220af973f450cf0b34a37924d40656e9f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kallsyms.h>
 #include <linux/kprobes.h>
 #include <linux/if_vlan.h>
+#include <net/inet_common.h>
 #include <net/tcp.h>
 #include <net/dst.h>
 
@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
        return mtu_idx;
 }
 
-static unsigned int select_rcv_wnd(struct chtls_sock *csk)
-{
-       unsigned int rcvwnd;
-       unsigned int wnd;
-       struct sock *sk;
-
-       sk = csk->sk;
-       wnd = tcp_full_space(sk);
-
-       if (wnd < MIN_RCV_WND)
-               wnd = MIN_RCV_WND;
-
-       rcvwnd = MAX_RCV_WND;
-
-       csk_set_flag(csk, CSK_UPDATE_RCV_WND);
-       return min(wnd, rcvwnd);
-}
-
 static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
 {
        int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
        csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
                                        req);
        opt0 = TCAM_BYPASS_F |
-              WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
+              WND_SCALE_V(RCV_WSCALE(tp)) |
               MSS_IDX_V(csk->mtu_idx) |
               L2T_IDX_V(csk->l2t_entry->idx) |
               NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
+static void chtls_set_tcp_window(struct chtls_sock *csk)
+{
+       struct net_device *ndev = csk->egress_dev;
+       struct port_info *pi = netdev_priv(ndev);
+       unsigned int linkspeed;
+       u8 scale;
+
+       linkspeed = pi->link_cfg.speed;
+       scale = linkspeed / SPEED_10000;
+#define CHTLS_10G_RCVWIN (256 * 1024)
+       csk->rcv_win = CHTLS_10G_RCVWIN;
+       if (scale)
+               csk->rcv_win *= scale;
+#define CHTLS_10G_SNDWIN (256 * 1024)
+       csk->snd_win = CHTLS_10G_SNDWIN;
+       if (scale)
+               csk->snd_win *= scale;
+}
+
 static struct sock *chtls_recv_sock(struct sock *lsk,
                                    struct request_sock *oreq,
                                    void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        csk->port_id = port_id;
        csk->egress_dev = ndev;
        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+       chtls_set_tcp_window(csk);
+       tp->rcv_wnd = csk->rcv_win;
+       csk->sndbuf = csk->snd_win;
        csk->ulp_mode = ULP_MODE_TLS;
        step = cdev->lldi->nrxq / cdev->lldi->nchan;
        csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1076,9 +1081,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        csk->sndbuf = newsk->sk_sndbuf;
        csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
                                         cxgb4_port_viid(ndev));
-       tp->rcv_wnd = select_rcv_wnd(csk);
        RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
-                                          WSCALE_OK(tp),
+                                          sock_net(newsk)->
+                                               ipv4.sysctl_tcp_window_scaling,
                                           tp->window_clamp);
        neigh_release(n);
        inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1130,6 +1135,7 @@ static void chtls_pass_accept_request(struct sock *sk,
        struct cpl_t5_pass_accept_rpl *rpl;
        struct cpl_pass_accept_req *req;
        struct listen_ctx *listen_ctx;
+       struct vlan_ethhdr *vlan_eh;
        struct request_sock *oreq;
        struct sk_buff *reply_skb;
        struct chtls_sock *csk;
@@ -1142,6 +1148,10 @@ static void chtls_pass_accept_request(struct sock *sk,
        unsigned int stid;
        unsigned int len;
        unsigned int tid;
+       bool th_ecn, ect;
+       __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+       u16 eth_hdr_len;
+       bool ecn_ok;
 
        req = cplhdr(skb) + RSS_HDR;
        tid = GET_TID(req);
@@ -1180,24 +1190,40 @@ static void chtls_pass_accept_request(struct sock *sk,
        oreq->mss = 0;
        oreq->ts_recent = 0;
 
-       eh = (struct ethhdr *)(req + 1);
-       iph = (struct iphdr *)(eh + 1);
+       eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
+       if (eth_hdr_len == ETH_HLEN) {
+               eh = (struct ethhdr *)(req + 1);
+               iph = (struct iphdr *)(eh + 1);
+               network_hdr = (void *)(eh + 1);
+       } else {
+               vlan_eh = (struct vlan_ethhdr *)(req + 1);
+               iph = (struct iphdr *)(vlan_eh + 1);
+               network_hdr = (void *)(vlan_eh + 1);
+       }
        if (iph->version != 0x4)
                goto free_oreq;
 
-       network_hdr = (void *)(eh + 1);
        tcph = (struct tcphdr *)(iph + 1);
+       skb_set_network_header(skb, (void *)iph - (void *)req);
 
        tcp_rsk(oreq)->tfo_listener = false;
        tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
        chtls_set_req_port(oreq, tcph->source, tcph->dest);
-       inet_rsk(oreq)->ecn_ok = 0;
        chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
-       if (req->tcpopt.wsf <= 14) {
+       ip_dsfield = ipv4_get_dsfield(iph);
+       if (req->tcpopt.wsf <= 14 &&
+           sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
                inet_rsk(oreq)->wscale_ok = 1;
                inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
        }
        inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+       th_ecn = tcph->ece && tcph->cwr;
+       if (th_ecn) {
+               ect = !INET_ECN_is_not_ect(ip_dsfield);
+               ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+               if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
+                       inet_rsk(oreq)->ecn_ok = 1;
+       }
 
        newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
        if (!newsk)
index afebbd87c4aa1d22ca179f558552cb2f410fcc0a..18f553fcc1673dda35c0aaf663c0670d7a24e458 100644 (file)
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
 
        req_wr->lsodisable_to_flags =
                        htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
-                             FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+                             TX_URG_V(skb_urgent(skb)) |
                              T6_TX_FORCE_F | wr_ulp_mode_force |
                              TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
                                         skb_queue_empty(&csk->txq)));
@@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
                                FW_OFLD_TX_DATA_WR_SHOVE_F);
 
        req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
-                       FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
-                       FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
-                                       (sk, CSK_TX_MORE_DATA)) &&
-                                        skb_queue_empty(&csk->txq)));
+                       TX_URG_V(skb_urgent(skb)) |
+                       TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+                                  skb_queue_empty(&csk->txq)));
        req->plen = htonl(len);
 }
 
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        int mss, flags, err;
        int recordsz = 0;
        int copied = 0;
-       int hdrlen = 0;
        long timeo;
 
        lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
                        recordsz = tls_header_read(&hdr, &msg->msg_iter);
                        size -= TLS_HEADER_LENGTH;
-                       hdrlen += TLS_HEADER_LENGTH;
+                       copied += TLS_HEADER_LENGTH;
                        csk->tlshws.txleft = recordsz;
                        csk->tlshws.type = hdr.type;
                        if (skb)
@@ -1083,10 +1081,8 @@ new_buf:
                        int off = TCP_OFF(sk);
                        bool merge;
 
-                       if (!page)
-                               goto wait_for_memory;
-
-                       pg_size <<= compound_order(page);
+                       if (page)
+                               pg_size <<= compound_order(page);
                        if (off < pg_size &&
                            skb_can_coalesce(skb, i, page, off)) {
                                merge = 1;
@@ -1187,7 +1183,7 @@ out:
                chtls_tcp_push(sk, flags);
 done:
        release_sock(sk);
-       return copied + hdrlen;
+       return copied;
 do_fault:
        if (!skb->len) {
                __skb_unlink(skb, &csk->txq);
index f472c51abe56ac7de376d58483a3508eabcc069b..563f8fe7686adc9c895dbb623cac3f7c9ae5a3a6 100644 (file)
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
 static int listen_notify_handler(struct notifier_block *this,
                                 unsigned long event, void *data)
 {
-       struct chtls_dev *cdev;
-       struct sock *sk;
-       int ret;
+       struct chtls_listen *clisten;
+       int ret = NOTIFY_DONE;
 
-       sk = data;
-       ret =  NOTIFY_DONE;
+       clisten = (struct chtls_listen *)data;
 
        switch (event) {
        case CHTLS_LISTEN_START:
+               ret = chtls_listen_start(clisten->cdev, clisten->sk);
+               kfree(clisten);
+               break;
        case CHTLS_LISTEN_STOP:
-               mutex_lock(&cdev_list_lock);
-               list_for_each_entry(cdev, &cdev_list, list) {
-                       if (event == CHTLS_LISTEN_START)
-                               ret = chtls_listen_start(cdev, sk);
-                       else
-                               chtls_listen_stop(cdev, sk);
-               }
-               mutex_unlock(&cdev_list_lock);
+               chtls_listen_stop(clisten->cdev, clisten->sk);
+               kfree(clisten);
                break;
        }
        return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
        return 0;
 }
 
-static int chtls_start_listen(struct sock *sk)
+static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
 {
+       struct chtls_listen *clisten;
        int err;
 
        if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
                return -EADDRNOTAVAIL;
 
        sk->sk_backlog_rcv = listen_backlog_rcv;
+       clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+       if (!clisten)
+               return -ENOMEM;
+       clisten->cdev = cdev;
+       clisten->sk = sk;
        mutex_lock(&notify_mutex);
        err = raw_notifier_call_chain(&listen_notify_list,
-                                     CHTLS_LISTEN_START, sk);
+                                     CHTLS_LISTEN_START, clisten);
        mutex_unlock(&notify_mutex);
        return err;
 }
 
-static void chtls_stop_listen(struct sock *sk)
+static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
 {
+       struct chtls_listen *clisten;
+
        if (sk->sk_protocol != IPPROTO_TCP)
                return;
 
+       clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+       if (!clisten)
+               return;
+       clisten->cdev = cdev;
+       clisten->sk = sk;
        mutex_lock(&notify_mutex);
        raw_notifier_call_chain(&listen_notify_list,
-                               CHTLS_LISTEN_STOP, sk);
+                               CHTLS_LISTEN_STOP, clisten);
        mutex_unlock(&notify_mutex);
 }
 
@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
 
 static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
 {
+       struct chtls_dev *cdev = to_chtls_dev(dev);
+
        if (sk->sk_state == TCP_LISTEN)
-               return chtls_start_listen(sk);
+               return chtls_start_listen(cdev, sk);
        return 0;
 }
 
 static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
 {
+       struct chtls_dev *cdev = to_chtls_dev(dev);
+
        if (sk->sk_state == TCP_LISTEN)
-               chtls_stop_listen(sk);
+               chtls_stop_listen(cdev, sk);
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+       int i;
+
+       tls_unregister_device(&cdev->tlsdev);
+       kvfree(cdev->kmap.addr);
+       idr_destroy(&cdev->hwtid_idr);
+       for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+               kfree_skb(cdev->rspq_skb_cache[i]);
+       kfree(cdev->lldi);
+       kfree_skb(cdev->askb);
+       kfree(cdev);
+}
+
+static inline void chtls_dev_release(struct kref *kref)
+{
+       struct chtls_dev *cdev;
+       struct tls_device *dev;
+
+       dev = container_of(kref, struct tls_device, kref);
+       cdev = to_chtls_dev(dev);
+       chtls_free_uld(cdev);
 }
 
 static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
        tlsdev->feature = chtls_inline_feature;
        tlsdev->hash = chtls_create_hash;
        tlsdev->unhash = chtls_destroy_hash;
-       tls_register_device(&cdev->tlsdev);
+       tlsdev->release = chtls_dev_release;
+       kref_init(&tlsdev->kref);
+       tls_register_device(tlsdev);
        cdev->cdev_state = CHTLS_CDEV_STATE_UP;
 }
 
-static void chtls_unregister_dev(struct chtls_dev *cdev)
-{
-       tls_unregister_device(&cdev->tlsdev);
-}
-
 static void process_deferq(struct work_struct *task_param)
 {
        struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
        return NULL;
 }
 
-static void chtls_free_uld(struct chtls_dev *cdev)
-{
-       int i;
-
-       chtls_unregister_dev(cdev);
-       kvfree(cdev->kmap.addr);
-       idr_destroy(&cdev->hwtid_idr);
-       for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
-               kfree_skb(cdev->rspq_skb_cache[i]);
-       kfree(cdev->lldi);
-       kfree_skb(cdev->askb);
-       kfree(cdev);
-}
-
 static void chtls_free_all_uld(void)
 {
        struct chtls_dev *cdev, *tmp;
 
        mutex_lock(&cdev_mutex);
        list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
-               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
-                       chtls_free_uld(cdev);
+               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
+                       list_del(&cdev->list);
+                       kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+               }
        }
        mutex_unlock(&cdev_mutex);
 }
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
                mutex_lock(&cdev_mutex);
                list_del(&cdev->list);
                mutex_unlock(&cdev_mutex);
-               chtls_free_uld(cdev);
+               kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
                break;
        default:
                break;
index 8816c697b2053c7c28f119f1362443d7b9ad6e98..387f1cf1dc207b0adac288cbf3fa413cc219d7f1 100644 (file)
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                        case CHIP_TOPAZ:
                                if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
                                    ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
-                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+                                   ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+                                   ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
                                        info->is_kicker = true;
                                        strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
                                } else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                if (type == CGS_UCODE_ID_SMU) {
                                        if (((adev->pdev->device == 0x67ef) &&
                                             ((adev->pdev->revision == 0xe0) ||
-                                             (adev->pdev->revision == 0xe2) ||
                                              (adev->pdev->revision == 0xe5))) ||
                                            ((adev->pdev->device == 0x67ff) &&
                                             ((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                              (adev->pdev->revision == 0xff)))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67ef) &&
+                                                  (adev->pdev->revision == 0xe2)) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
                                }
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                              (adev->pdev->revision == 0xe7) ||
                                              (adev->pdev->revision == 0xef))) ||
                                            ((adev->pdev->device == 0x6fdf) &&
-                                            (adev->pdev->revision == 0xef))) {
+                                            ((adev->pdev->revision == 0xef) ||
+                                             (adev->pdev->revision == 0xff)))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
-                                       } else
+                                       } else if ((adev->pdev->device == 0x67df) &&
+                                                  ((adev->pdev->revision == 0xe1) ||
+                                                   (adev->pdev->revision == 0xf7))) {
+                                               info->is_kicker = true;
+                                               strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+                                       } else {
                                                strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+                                       }
                                } else if (type == CGS_UCODE_ID_SMU_SK) {
                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
                                }
                                break;
                        case CHIP_POLARIS12:
-                               strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               if (((adev->pdev->device == 0x6987) &&
+                                    ((adev->pdev->revision == 0xc0) ||
+                                     (adev->pdev->revision == 0xc3))) ||
+                                   ((adev->pdev->device == 0x6981) &&
+                                    ((adev->pdev->revision == 0x00) ||
+                                     (adev->pdev->revision == 0x01) ||
+                                     (adev->pdev->revision == 0x10)))) {
+                                       info->is_kicker = true;
+                                       strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+                               } else {
+                                       strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               }
                                break;
                        case CHIP_VEGAM:
                                strcpy(fw_name, "amdgpu/vegam_smc.bin");
index 663043c8f0f5710244cb4bc787a8cd19fc076b50..0acc8dee2cb8d3ba99ebcef8af507b91021523ea 100644 (file)
@@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_chunk;
        }
 
+       mutex_lock(&p->ctx->lock);
+
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
                goto free_chunk;
        }
 
-       mutex_lock(&p->ctx->lock);
-
        /* get chunks */
        chunk_array_user = u64_to_user_ptr(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
index 8de55f7f1a3a3922b4a1ac2d17cf12cdd35d1fd6..74b611e8a1b10c88abfaa0eb66950a073a09bd7b 100644 (file)
@@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Vega 12 */
        {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+       {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
        /* Raven */
index a9f18ea7e354377c4ed47532eaaf1257e11b695b..e4ded890b1cbcc950e68288d7bafa1dbfdcaf456 100644 (file)
@@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x6864, &vega10_device_info },        /* Vega10 */
        { 0x6867, &vega10_device_info },        /* Vega10 */
        { 0x6868, &vega10_device_info },        /* Vega10 */
+       { 0x6869, &vega10_device_info },        /* Vega10 */
+       { 0x686A, &vega10_device_info },        /* Vega10 */
+       { 0x686B, &vega10_device_info },        /* Vega10 */
        { 0x686C, &vega10_vf_device_info },     /* Vega10  vf*/
+       { 0x686D, &vega10_device_info },        /* Vega10 */
+       { 0x686E, &vega10_device_info },        /* Vega10 */
+       { 0x686F, &vega10_device_info },        /* Vega10 */
        { 0x687F, &vega10_device_info },        /* Vega10 */
        { 0x66a0, &vega20_device_info },        /* Vega20 */
        { 0x66a1, &vega20_device_info },        /* Vega20 */
        { 0x66a2, &vega20_device_info },        /* Vega20 */
        { 0x66a3, &vega20_device_info },        /* Vega20 */
+       { 0x66a4, &vega20_device_info },        /* Vega20 */
        { 0x66a7, &vega20_device_info },        /* Vega20 */
        { 0x66af, &vega20_device_info }         /* Vega20 */
 };
index 3367dd30cdd0d1c8c8482afb436885383a736103..3b7fce5d7258eb71f2b8fbfa01c82c7143124ae9 100644 (file)
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.disable_auto_wattman = 1;
        data->registry_data.auto_wattman_debug = 0;
        data->registry_data.auto_wattman_sample_period = 100;
-       data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+       data->registry_data.fclk_gfxclk_ratio = 0;
        data->registry_data.auto_wattman_threshold = 50;
        data->registry_data.gfxoff_controlled_by_driver = 1;
        data->gfxoff_allowed = false;
index 62f36ba2435be4770e9a14f9dd8a779a2802fa79..c1a99dfe4913f247d20ce8cafab4dd5e0499715f 100644 (file)
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_AgmResetPsm                 ((uint16_t) 0x403)
 #define PPSMC_MSG_ReadVftCell                 ((uint16_t) 0x404)
 
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage      ((uint16_t) 0x415)
+
 #define PPSMC_MSG_GFX_CU_PG_ENABLE            ((uint16_t) 0x280)
 #define PPSMC_MSG_GFX_CU_PG_DISABLE           ((uint16_t) 0x281)
 #define PPSMC_MSG_GetCurrPkgPwr               ((uint16_t) 0x282)
index 872d3824337bf90ad8abbf4cce1eb9d77630c48a..a1e0ac9ae2482a776416aa987e0787e65dd3fe9a 100644 (file)
@@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
 
        smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
 
+       /* Apply avfs cks-off voltages to avoid the overshoot
+        * when switching to the highest sclk frequency
+        */
+       if (data->apply_avfs_cks_off_voltage)
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
        return 0;
 }
 
index 99d5e4f98f49cd7ec103a70eee060b8c9e0241e4..a6edd5df33b0fa0cf9b4b3ed8dd694ba9898b14b 100644 (file)
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
 MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
index 481896fb712abf4c178b28af2f224a369d0aefd9..85e6736f0a327742329dc9ee3bc8cf2dce294ed3 100644 (file)
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                plane->bpp = skl_pixel_formats[fmt].bpp;
                plane->drm_format = skl_pixel_formats[fmt].drm_format;
        } else {
-               plane->tiled = !!(val & DISPPLANE_TILED);
+               plane->tiled = val & DISPPLANE_TILED;
                fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
                plane->bpp = bdw_pixel_formats[fmt].bpp;
                plane->drm_format = bdw_pixel_formats[fmt].drm_format;
index ffdbbac4400eaf7d86390a3ff105a18ef36645ea..47062ee979cfb2d38b6078455562c64ef530ec8a 100644 (file)
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_uncore_sanitize(dev_priv);
 
+       intel_gt_init_workarounds(dev_priv);
        i915_gem_load_init_fences(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
index 9102571e9692d1540ad987ed31c4ec735dd80cf5..872a2e159a5f903fc6a9d84d6392cc04a44e6d87 100644 (file)
@@ -67,6 +67,7 @@
 #include "intel_ringbuffer.h"
 #include "intel_uncore.h"
 #include "intel_wopcm.h"
+#include "intel_workarounds.h"
 #include "intel_uc.h"
 
 #include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
        struct i915_workarounds workarounds;
+       struct i915_wa_list gt_wa_list;
 
        struct i915_frontbuffer_tracking fb_tracking;
 
@@ -2148,6 +2150,8 @@ struct drm_i915_private {
                struct delayed_work idle_work;
 
                ktime_t last_init_time;
+
+               struct i915_vma *scratch;
        } gt;
 
        /* perform PHY state sanity checks? */
@@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
                return I915_HWS_CSB_WRITE_INDEX;
 }
 
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+       return i915_ggtt_offset(i915->gt.scratch);
+}
+
 #endif
index 0c8aa57ce83b4033723ded1540986b628d92d772..6ae9a6080cc8838b45dc11fb6de28f63fbe09b04 100644 (file)
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
                }
        }
 
-       intel_gt_workarounds_apply(dev_priv);
+       intel_gt_apply_workarounds(dev_priv);
 
        i915_gem_init_swizzling(dev_priv);
 
@@ -5500,6 +5500,44 @@ err_active:
        goto out_ctx;
 }
 
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
+
+       obj = i915_gem_object_create_stolen(i915, size);
+       if (!obj)
+               obj = i915_gem_object_create_internal(i915, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               return PTR_ERR(obj);
+       }
+
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unref;
+       }
+
+       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+       if (ret)
+               goto err_unref;
+
+       i915->gt.scratch = vma;
+       return 0;
+
+err_unref:
+       i915_gem_object_put(obj);
+       return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+       i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
@@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                goto err_unlock;
        }
 
-       ret = i915_gem_contexts_init(dev_priv);
+       ret = i915_gem_init_scratch(dev_priv,
+                                   IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
                goto err_ggtt;
        }
 
+       ret = i915_gem_contexts_init(dev_priv);
+       if (ret) {
+               GEM_BUG_ON(ret == -EIO);
+               goto err_scratch;
+       }
+
        ret = intel_engines_init(dev_priv);
        if (ret) {
                GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5669,8 @@ err_pm:
 err_context:
        if (ret != -EIO)
                i915_gem_contexts_fini(dev_priv);
+err_scratch:
+       i915_gem_fini_scratch(dev_priv);
 err_ggtt:
 err_unlock:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
        intel_uc_fini(dev_priv);
        i915_gem_cleanup_engines(dev_priv);
        i915_gem_contexts_fini(dev_priv);
+       i915_gem_fini_scratch(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       intel_wa_list_free(&dev_priv->gt_wa_list);
+
        intel_cleanup_gt_powersave(dev_priv);
 
        intel_uc_fini_misc(dev_priv);
index d4fac09095f862aed3131243957059de2df4f6b0..1aaccbe7e1debd0c11440ac9acae9c15b07880d5 100644 (file)
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
                else if (gen >= 4)
                        len = 4;
                else
-                       len = 6;
+                       len = 3;
 
                batch = reloc_gpu(eb, vma, len);
                if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
                        *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *batch++ = addr;
                        *batch++ = target_offset;
-
-                       /* And again for good measure (blb/pnv) */
-                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-                       *batch++ = addr;
-                       *batch++ = target_offset;
                }
 
                goto out;
index 3eb33e000d6f00f3ae4b3fdbc8f37f38e97b4d83..db4128d6c09b6735a02efb76ea7cf756459eaa51 100644 (file)
@@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
                        if (HAS_BROKEN_CS_TLB(i915))
                                ee->wa_batchbuffer =
                                        i915_error_object_create(i915,
-                                                                engine->scratch);
+                                                                i915->gt.scratch);
                        request_record_user_bo(request, ee);
 
                        ee->ctx =
index 217ed3ee1cab4e808f1dd8f4e8b60dca36ea65da..76b5f94ea6cb62a38c42bec9d4da03c638489480 100644 (file)
@@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init_cmd_parser(engine);
 }
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-                               unsigned int size)
-{
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-       int ret;
-
-       WARN_ON(engine->scratch);
-
-       obj = i915_gem_object_create_stolen(engine->i915, size);
-       if (!obj)
-               obj = i915_gem_object_create_internal(engine->i915, size);
-       if (IS_ERR(obj)) {
-               DRM_ERROR("Failed to allocate scratch page\n");
-               return PTR_ERR(obj);
-       }
-
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto err_unref;
-       }
-
-       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
-       if (ret)
-               goto err_unref;
-
-       engine->scratch = vma;
-       return 0;
-
-err_unref:
-       i915_gem_object_put(obj);
-       return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
-       i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
 static void cleanup_status_page(struct intel_engine_cs *engine)
 {
        if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       intel_engine_cleanup_scratch(engine);
-
        cleanup_status_page(engine);
 
        intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        __intel_context_unpin(i915->kernel_context, engine);
 
        i915_timeline_fini(&engine->timeline);
+
+       intel_wa_list_free(&engine->wa_list);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
index 37c94a54efcbb2501509b5a838e61d983985d8b2..58d1d3d47dd31ed7aa150f6b9865eb05e0a170b7 100644 (file)
@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
         * may not be visible to the HW prior to the completion of the UC
         * register write and that we may begin execution from the context
         * before its image is complete leading to invalid PD chasing.
+        *
+        * Furthermore, Braswell, at least, wants a full mb to be sure that
+        * the writes are coherent in memory (visible to the GPU) prior to
+        * execution, and not just visible to other CPUs (as is the result of
+        * wmb).
         */
-       wmb();
+       mb();
        return ce->lrc_desc;
 }
 
@@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
 static u32 *
 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 {
+       /* NB no one else is allowed to scribble over scratch + 256! */
        *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+       *batch++ = i915_scratch_offset(engine->i915) + 256;
        *batch++ = 0;
 
        *batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 
        *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+       *batch++ = i915_scratch_offset(engine->i915) + 256;
        *batch++ = 0;
 
        return batch;
@@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                       PIPE_CONTROL_GLOBAL_GTT_IVB |
                                       PIPE_CONTROL_CS_STALL |
                                       PIPE_CONTROL_QW_WRITE,
-                                      i915_ggtt_offset(engine->scratch) +
+                                      i915_scratch_offset(engine->i915) +
                                       2 * CACHELINE_BYTES);
 
        *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                               PIPE_CONTROL_GLOBAL_GTT_IVB |
                                               PIPE_CONTROL_CS_STALL |
                                               PIPE_CONTROL_QW_WRITE,
-                                              i915_ggtt_offset(engine->scratch)
+                                              i915_scratch_offset(engine->i915)
                                               + 2 * CACHELINE_BYTES);
        }
 
@@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
+       intel_engine_apply_workarounds(engine);
+
        intel_mocs_init_engine(engine);
 
        intel_engine_reset_breadcrumbs(engine);
@@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
 {
        struct intel_engine_cs *engine = request->engine;
        u32 scratch_addr =
-               i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+               i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false, dc_flush_wa = false;
        u32 *cs, flags = 0;
        int len;
@@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       ret = intel_engine_create_scratch(engine, PAGE_SIZE);
-       if (ret)
-               goto err_cleanup_common;
-
        ret = intel_init_workaround_bb(engine);
        if (ret) {
                /*
@@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
                          ret);
        }
 
-       return 0;
+       intel_engine_init_workarounds(engine);
 
-err_cleanup_common:
-       intel_engine_cleanup_common(engine);
-       return ret;
+       return 0;
 }
 
 int logical_xcs_ring_init(struct intel_engine_cs *engine)
index 187bb0ceb4ac4324b3c12ab72635d4a776b2129c..1f8d2a66c791fee7a4e279942324015d6add1371 100644 (file)
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
 {
+       unsigned int num_store_dw;
        u32 cmd, *cs;
 
        cmd = MI_FLUSH;
-
+       num_store_dw = 0;
        if (mode & EMIT_INVALIDATE)
                cmd |= MI_READ_FLUSH;
+       if (mode & EMIT_FLUSH)
+               num_store_dw = 4;
 
-       cs = intel_ring_begin(rq, 2);
+       cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
-       *cs++ = MI_NOOP;
+       while (num_store_dw--) {
+               *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+               *cs++ = i915_scratch_offset(rq->i915);
+               *cs++ = 0;
+       }
+       *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
        intel_ring_advance(rq, cs);
 
        return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
         */
        if (mode & EMIT_INVALIDATE) {
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
-                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
 
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
                        *cs++ = MI_FLUSH;
 
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
-                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
        }
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs;
 
        cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
 static int
 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
        int ret;
 
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 static int
 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr =
-               i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
 
        /*
@@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
 }
 
 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
@@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
-       u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+       u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+       GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
 
        cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
@@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
        struct i915_timeline *timeline;
        struct intel_ring *ring;
-       unsigned int size;
        int err;
 
        intel_engine_setup_common(engine);
@@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        GEM_BUG_ON(engine->buffer);
        engine->buffer = ring;
 
-       size = PAGE_SIZE;
-       if (HAS_BROKEN_CS_TLB(engine->i915))
-               size = I830_WA_SIZE;
-       err = intel_engine_create_scratch(engine, size);
-       if (err)
-               goto err_unpin;
-
        err = intel_engine_init_common(engine);
        if (err)
-               goto err_scratch;
+               goto err_unpin;
 
        return 0;
 
-err_scratch:
-       intel_engine_cleanup_scratch(engine);
 err_unpin:
        intel_ring_unpin(ring);
 err_ring:
@@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
        /* Stall until the page table load is complete */
        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
        *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = i915_ggtt_offset(engine->scratch);
+       *cs++ = i915_scratch_offset(rq->i915);
        *cs++ = MI_NOOP;
 
        intel_ring_advance(rq, cs);
@@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                        /* Insert a delay before the next switch! */
                        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
                        *cs++ = i915_mmio_reg_offset(last_reg);
-                       *cs++ = i915_ggtt_offset(engine->scratch);
+                       *cs++ = i915_scratch_offset(rq->i915);
                        *cs++ = MI_NOOP;
                }
                *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
index 2dfa585712c28ac4196830a30dafc4448b3de604..767a7192c969751da0fce60a0874523049bc1de2 100644 (file)
@@ -15,6 +15,7 @@
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 #include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
 
 struct drm_printer;
 struct i915_sched_attr;
@@ -440,7 +441,7 @@ struct intel_engine_cs {
 
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
-       struct i915_vma *scratch;
+       struct i915_wa_list wa_list;
 
        u32             irq_keep_mask; /* always keep these interrupts */
        u32             irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
-                               unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
index 4bcdeaf8d98fa3de5aec7790971098905b5a688b..6e580891db96fa9350bfa2a2f9a68be50703229b 100644 (file)
  * - Public functions to init or apply the given workaround type.
  */
 
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
+{
+       wal->name = name;
+}
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+       if (!wal->count)
+               return;
+
+       DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+                        wal->count, wal->name);
+}
+
 static void wa_add(struct drm_i915_private *i915,
                   i915_reg_t reg, const u32 mask, const u32 val)
 {
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
        return 0;
 }
 
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+       const unsigned int grow = 1 << 4;
+
+       GEM_BUG_ON(!is_power_of_2(grow));
+
+       if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+               struct i915_wa *list;
+
+               list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+                                    GFP_KERNEL);
+               if (!list) {
+                       DRM_ERROR("No space for workaround init!\n");
+                       return;
+               }
+
+               if (wal->list)
+                       memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+               wal->list = list;
+       }
+
+       wal->list[wal->count++] = *wa;
+}
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+       struct i915_wa wa = {
+               .reg = reg,
+               .mask = val,
+               .val = _MASKED_BIT_ENABLE(val)
+       };
+
+       wal_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+                  u32 val)
 {
+       struct i915_wa wa = {
+               .reg = reg,
+               .mask = mask,
+               .val = val
+       };
+
+       wal_add(wal, &wa);
 }
 
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
+       wa_write_masked_or(wal, reg, ~0, val);
 }
 
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-       /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
-       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
-                  _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+       wa_write_masked_or(wal, reg, val, val);
+}
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
-       I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
-                  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
        /* WaDisableKillLogic:bxt,skl,kbl */
-       if (!IS_COFFEELAKE(dev_priv))
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          ECOCHK_DIS_TLB);
+       if (!IS_COFFEELAKE(i915))
+               wa_write_or(wal,
+                           GAM_ECOCHK,
+                           ECOCHK_DIS_TLB);
 
-       if (HAS_LLC(dev_priv)) {
+       if (HAS_LLC(i915)) {
                /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
                 *
                 * Must match Display Engine. See
                 * WaCompressedResourceDisplayNewHashMode.
                 */
-               I915_WRITE(MMCD_MISC_CTRL,
-                          I915_READ(MMCD_MISC_CTRL) |
-                          MMCD_PCLA |
-                          MMCD_HOTSPOT_EN);
+               wa_write_or(wal,
+                           MMCD_MISC_CTRL,
+                           MMCD_PCLA | MMCD_HOTSPOT_EN);
        }
 
        /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
-       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                  BDW_DISABLE_HDC_INVALIDATION);
-
-       /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
-       if (IS_GEN9_LP(dev_priv)) {
-               u32 val = I915_READ(GEN8_L3SQCREG1);
-
-               val &= ~L3_PRIO_CREDITS_MASK;
-               val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
-               I915_WRITE(GEN8_L3SQCREG1, val);
-       }
-
-       /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
-       I915_WRITE(GEN8_L3SQCREG4,
-                  I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       wa_write_or(wal,
+                   GAM_ECOCHK,
+                   BDW_DISABLE_HDC_INVALIDATION);
 }
 
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:skl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableGafsUnitClkGating:skl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:skl */
-       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
-               I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                          I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+               wa_write_or(wal,
+                           GEN9_GAMT_ECO_REG_RW_IA,
+                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaDisablePooledEuLoadBalancingFix:bxt */
-       I915_WRITE(FF_SLICE_CS_CHICKEN2,
-                  _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+       gen9_gt_workarounds_init(i915);
 
        /* WaInPlaceDecompressionHang:bxt */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:kbl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableDynamicCreditSharing:kbl */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
-               I915_WRITE(GAMT_CHKN_BIT_REG,
-                          I915_READ(GAMT_CHKN_BIT_REG) |
-                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+       if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+               wa_write_or(wal,
+                           GAMT_CHKN_BIT_REG,
+                           GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
 
        /* WaDisableGafsUnitClkGating:kbl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:kbl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaKBLVECSSemaphoreWaitPoll:kbl */
-       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
-               struct intel_engine_cs *engine;
-               unsigned int tmp;
-
-               for_each_engine(engine, dev_priv, tmp) {
-                       if (engine->id == RCS)
-                               continue;
-
-                       I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
-               }
-       }
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       gen9_gt_workarounds_init(i915);
 }
 
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       gen9_gt_workarounds_apply(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* WaEnableGapsTsvCreditFix:cfl */
-       I915_WRITE(GEN8_GARBCNTL,
-                  I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+       gen9_gt_workarounds_init(i915);
 
        /* WaDisableGafsUnitClkGating:cfl */
-       I915_WRITE(GEN7_UCGCTL4,
-                  I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+       wa_write_or(wal,
+                   GEN7_UCGCTL4,
+                   GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:cfl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
 static void wa_init_mcr(struct drm_i915_private *dev_priv)
 {
        const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
-       u32 mcr;
+       struct i915_wa_list *wal = &dev_priv->gt_wa_list;
        u32 mcr_slice_subslice_mask;
 
        /*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
                WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
        }
 
-       mcr = I915_READ(GEN8_MCR_SELECTOR);
-
        if (INTEL_GEN(dev_priv) >= 11)
                mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
                                          GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
         * occasions, such as INSTDONE, where this value is dependent
         * on s/ss combo, the read should be done with read_subslice_reg.
         */
-       mcr &= ~mcr_slice_subslice_mask;
-       mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
-       I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+       wa_write_masked_or(wal,
+                          GEN8_MCR_SELECTOR,
+                          mcr_slice_subslice_mask,
+                          intel_calculate_mcr_s_ss_select(dev_priv));
 }
 
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       wa_init_mcr(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
+
+       wa_init_mcr(i915);
 
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
-       if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
-               I915_WRITE(GAMT_CHKN_BIT_REG,
-                          I915_READ(GAMT_CHKN_BIT_REG) |
-                          GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+       if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+               wa_write_or(wal,
+                           GAMT_CHKN_BIT_REG,
+                           GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
 
        /* WaInPlaceDecompressionHang:cnl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
-                  I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaEnablePreemptionGranularityControlByUMD:cnl */
-       I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
 {
-       wa_init_mcr(dev_priv);
+       struct i915_wa_list *wal = &i915->gt_wa_list;
 
-       /* This is not an Wa. Enable for better image quality */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+       wa_init_mcr(i915);
 
        /* WaInPlaceDecompressionHang:icl */
-       I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
-                                           GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
-       /* WaPipelineFlushCoherentLines:icl */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN8_LQSC_FLUSH_COHERENT_LINES);
-
-       /* Wa_1405543622:icl
-        * Formerly known as WaGAPZPriorityScheme
-        */
-       I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
-                                 GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
-       /* Wa_1604223664:icl
-        * Formerly known as WaL3BankAddressHashing
-        */
-       I915_WRITE(GEN8_GARBCNTL,
-                  (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
-                  GEN11_HASH_CTRL_EXCL_BIT0);
-       I915_WRITE(GEN11_GLBLINVL,
-                  (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
-                  GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+       wa_write_or(wal,
+                   GEN9_GAMT_ECO_REG_RW_IA,
+                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 
        /* WaModifyGamTlbPartitioning:icl */
-       I915_WRITE(GEN11_GACB_PERF_CTRL,
-                  (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
-                  GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
-       /* Wa_1405733216:icl
-        * Formerly known as WaDisableCleanEvicts
-        */
-       I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                                  GEN11_LQSC_CLEAN_EVICT_DISABLE);
+       wa_write_masked_or(wal,
+                          GEN11_GACB_PERF_CTRL,
+                          GEN11_HASH_CTRL_MASK,
+                          GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
 
        /* Wa_1405766107:icl
         * Formerly known as WaCL2SFHalfMaxAlloc
         */
-       I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
-                                     GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+       wa_write_or(wal,
+                   GEN11_LSN_UNSLCVC,
+                   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+                   GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
 
        /* Wa_220166154:icl
         * Formerly known as WaDisCtxReload
         */
-       I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
-                                          GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+       wa_write_or(wal,
+                   GEN8_GAMW_ECO_DEV_RW_IA,
+                   GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
        /* Wa_1405779004:icl (pre-prod) */
-       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
-               I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
-                          I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
-                          MSCUNIT_CLKGATE_DIS);
+       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+               wa_write_or(wal,
+                           SLICE_UNIT_LEVEL_CLKGATE,
+                           MSCUNIT_CLKGATE_DIS);
 
        /* Wa_1406680159:icl */
-       I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
-                  I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
-                  GWUNIT_CLKGATE_DIS);
-
-       /* Wa_1604302699:icl */
-       I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
-                  I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
-                  GEN11_I2M_WRITE_DISABLE);
+       wa_write_or(wal,
+                   SUBSLICE_UNIT_LEVEL_CLKGATE,
+                   GWUNIT_CLKGATE_DIS);
 
        /* Wa_1406838659:icl (pre-prod) */
-       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
-               I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
-                          I915_READ(INF_UNIT_LEVEL_CLKGATE) |
-                          CGPSF_CLKGATE_DIS);
-
-       /* WaForwardProgressSoftReset:icl */
-       I915_WRITE(GEN10_SCRATCH_LNCF2,
-                  I915_READ(GEN10_SCRATCH_LNCF2) |
-                  PMFLUSHDONE_LNICRSDROP |
-                  PMFLUSH_GAPL3UNBLOCK |
-                  PMFLUSHDONE_LNEBLK);
+       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+               wa_write_or(wal,
+                           INF_UNIT_LEVEL_CLKGATE,
+                           CGPSF_CLKGATE_DIS);
 
        /* Wa_1406463099:icl
         * Formerly known as WaGamTlbPendError
         */
-       I915_WRITE(GAMT_CHKN_BIT_REG,
-                  I915_READ(GAMT_CHKN_BIT_REG) |
-                  GAMT_CHKN_DISABLE_L3_COH_PIPE);
+       wa_write_or(wal,
+                   GAMT_CHKN_BIT_REG,
+                   GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
 {
-       if (INTEL_GEN(dev_priv) < 8)
+       struct i915_wa_list *wal = &i915->gt_wa_list;
+
+       wa_init_start(wal, "GT");
+
+       if (INTEL_GEN(i915) < 8)
                return;
-       else if (IS_BROADWELL(dev_priv))
-               bdw_gt_workarounds_apply(dev_priv);
-       else if (IS_CHERRYVIEW(dev_priv))
-               chv_gt_workarounds_apply(dev_priv);
-       else if (IS_SKYLAKE(dev_priv))
-               skl_gt_workarounds_apply(dev_priv);
-       else if (IS_BROXTON(dev_priv))
-               bxt_gt_workarounds_apply(dev_priv);
-       else if (IS_KABYLAKE(dev_priv))
-               kbl_gt_workarounds_apply(dev_priv);
-       else if (IS_GEMINILAKE(dev_priv))
-               glk_gt_workarounds_apply(dev_priv);
-       else if (IS_COFFEELAKE(dev_priv))
-               cfl_gt_workarounds_apply(dev_priv);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_gt_workarounds_apply(dev_priv);
-       else if (IS_ICELAKE(dev_priv))
-               icl_gt_workarounds_apply(dev_priv);
+       else if (IS_BROADWELL(i915))
+               return;
+       else if (IS_CHERRYVIEW(i915))
+               return;
+       else if (IS_SKYLAKE(i915))
+               skl_gt_workarounds_init(i915);
+       else if (IS_BROXTON(i915))
+               bxt_gt_workarounds_init(i915);
+       else if (IS_KABYLAKE(i915))
+               kbl_gt_workarounds_init(i915);
+       else if (IS_GEMINILAKE(i915))
+               glk_gt_workarounds_init(i915);
+       else if (IS_COFFEELAKE(i915))
+               cfl_gt_workarounds_init(i915);
+       else if (IS_CANNONLAKE(i915))
+               cnl_gt_workarounds_init(i915);
+       else if (IS_ICELAKE(i915))
+               icl_gt_workarounds_init(i915);
        else
-               MISSING_CASE(INTEL_GEN(dev_priv));
+               MISSING_CASE(INTEL_GEN(i915));
+
+       wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+                  const struct i915_wa_list *wal)
+{
+       enum forcewake_domains fw = 0;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+               fw |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    wa->reg,
+                                                    FW_REG_READ |
+                                                    FW_REG_WRITE);
+
+       return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+       enum forcewake_domains fw;
+       unsigned long flags;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       if (!wal->count)
+               return;
+
+       fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+       spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+       intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+               u32 val = I915_READ_FW(wa->reg);
+
+               val &= ~wa->mask;
+               val |= wa->val;
+
+               I915_WRITE_FW(wa->reg, val);
+       }
+
+       intel_uncore_forcewake_put__locked(dev_priv, fw);
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+       DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+       wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
 }
 
 struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
        whitelist_apply(engine, whitelist_build(engine, &w));
 }
 
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       if (IS_ICELAKE(i915)) {
+               /* This is not an Wa. Enable for better image quality */
+               wa_masked_en(wal,
+                            _3D_CHICKEN3,
+                            _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+               /* WaPipelineFlushCoherentLines:icl */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+               /*
+                * Wa_1405543622:icl
+                * Formerly known as WaGAPZPriorityScheme
+                */
+               wa_write_or(wal,
+                           GEN8_GARBCNTL,
+                           GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+               /*
+                * Wa_1604223664:icl
+                * Formerly known as WaL3BankAddressHashing
+                */
+               wa_write_masked_or(wal,
+                                  GEN8_GARBCNTL,
+                                  GEN11_HASH_CTRL_EXCL_MASK,
+                                  GEN11_HASH_CTRL_EXCL_BIT0);
+               wa_write_masked_or(wal,
+                                  GEN11_GLBLINVL,
+                                  GEN11_BANK_HASH_ADDR_EXCL_MASK,
+                                  GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+               /*
+                * Wa_1405733216:icl
+                * Formerly known as WaDisableCleanEvicts
+                */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+               /* Wa_1604302699:icl */
+               wa_write_or(wal,
+                           GEN10_L3_CHICKEN_MODE_REGISTER,
+                           GEN11_I2M_WRITE_DISABLE);
+
+               /* WaForwardProgressSoftReset:icl */
+               wa_write_or(wal,
+                           GEN10_SCRATCH_LNCF2,
+                           PMFLUSHDONE_LNICRSDROP |
+                           PMFLUSH_GAPL3UNBLOCK |
+                           PMFLUSHDONE_LNEBLK);
+       }
+
+       if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+               /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+               wa_masked_en(wal,
+                            GEN7_FF_SLICE_CS_CHICKEN1,
+                            GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+       }
+
+       if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+               /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+               wa_write_or(wal,
+                           GEN8_GARBCNTL,
+                           GEN9_GAPS_TSV_CREDIT_DISABLE);
+       }
+
+       if (IS_BROXTON(i915)) {
+               /* WaDisablePooledEuLoadBalancingFix:bxt */
+               wa_masked_en(wal,
+                            FF_SLICE_CS_CHICKEN2,
+                            GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
+       if (IS_GEN9(i915)) {
+               /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+               wa_masked_en(wal,
+                            GEN9_CSFE_CHICKEN1_RCS,
+                            GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+               /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+               wa_write_or(wal,
+                           BDW_SCRATCH1,
+                           GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+               /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+               if (IS_GEN9_LP(i915))
+                       wa_write_masked_or(wal,
+                                          GEN8_L3SQCREG1,
+                                          L3_PRIO_CREDITS_MASK,
+                                          L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+               /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+               wa_write_or(wal,
+                           GEN8_L3SQCREG4,
+                           GEN8_LQSC_FLUSH_COHERENT_LINES);
+       }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *i915 = engine->i915;
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       /* WaKBLVECSSemaphoreWaitPoll:kbl */
+       if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+               wa_write(wal,
+                        RING_SEMA_WAIT_POLL(engine->mmio_base),
+                        1);
+       }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct i915_wa_list *wal = &engine->wa_list;
+
+       if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+               return;
+
+       wa_init_start(wal, engine->name);
+
+       if (engine->id == RCS)
+               rcs_engine_wa_init(engine);
+       else
+               xcs_engine_wa_init(engine);
+
+       wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+       wa_list_apply(engine->i915, &engine->wa_list);
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/intel_workarounds.c"
 #endif
index b11d0623e62686f4e2b6f1fdbaddb476dba3231e..979695a539648cee1fa4b7e870a9d12a6c4d5892 100644 (file)
@@ -7,11 +7,35 @@
 #ifndef _I915_WORKAROUNDS_H_
 #define _I915_WORKAROUNDS_H_
 
+#include <linux/slab.h>
+
+struct i915_wa {
+       i915_reg_t        reg;
+       u32               mask;
+       u32               val;
+};
+
+struct i915_wa_list {
+       const char      *name;
+       struct i915_wa  *list;
+       unsigned int    count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+       kfree(wal->list);
+       memset(wal, 0, sizeof(*wal));
+}
+
 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
 int intel_ctx_workarounds_emit(struct i915_request *rq);
 
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
 
 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
 
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+
 #endif
index 66df1b1779592195e38fe0437874c39241a2f297..27b507eb4a997d5dee2809d454341cb7a79e2731 100644 (file)
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
        dsi->encoder.possible_crtcs = 1;
 
        /* If there's a bridge, attach to it and let it create the connector */
-       ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
-       if (ret) {
-               DRM_ERROR("Failed to attach bridge to drm\n");
-
+       if (dsi->bridge) {
+               ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+               if (ret) {
+                       DRM_ERROR("Failed to attach bridge to drm\n");
+                       goto err_encoder_cleanup;
+               }
+       } else {
                /* Otherwise create our own connector and attach to a panel */
                ret = mtk_dsi_create_connector(drm, dsi);
                if (ret)
index 6cbbae3f438bd0e44cbc01406687ed82170b7372..db1bf7f88c1f5aa8b2c57e44971cc1e33201443a 100644 (file)
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 /******************************************************************************
  * EVO channel helpers
  *****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+       /* Push buffer fetches are not coherent with BAR1, we need to ensure
+        * writes have been flushed right through to VRAM before writing PUT.
+        */
+       if (dmac->push.type & NVIF_MEM_VRAM) {
+               struct nvif_device *device = dmac->base.device;
+               nvif_wr32(&device->object, 0x070000, 0x00000001);
+               nvif_msec(device, 2000,
+                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+                               break;
+               );
+       }
+}
+
 u32 *
 evo_wait(struct nv50_dmac *evoc, int nr)
 {
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
        mutex_lock(&dmac->lock);
        if (put + nr >= (PAGE_SIZE / 4) - 8) {
                dmac->ptr[put] = 0x20000000;
+               evo_flush(dmac);
 
                nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
                if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
 {
        struct nv50_dmac *dmac = evoc;
 
-       /* Push buffer fetches are not coherent with BAR1, we need to ensure
-        * writes have been flushed right through to VRAM before writing PUT.
-        */
-       if (dmac->push.type & NVIF_MEM_VRAM) {
-               struct nvif_device *device = dmac->base.device;
-               nvif_wr32(&device->object, 0x070000, 0x00000001);
-               nvif_msec(device, 2000,
-                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
-                               break;
-               );
-       }
+       evo_flush(dmac);
 
        nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
        mutex_unlock(&dmac->lock);
@@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
 {
        struct nv50_mstm *mstm = *pmstm;
        if (mstm) {
+               drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
                kfree(*pmstm);
                *pmstm = NULL;
        }
index 2b2baf6e0e0d6bbde2aadefd0a16e55aaadf3736..d2928d43f29a43d772729ef68036b426f861047d 100644 (file)
@@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
                goto err_free;
        }
 
+       err = nouveau_drm_device_init(drm);
+       if (err)
+               goto err_put;
+
        platform_set_drvdata(pdev, drm);
 
        return drm;
 
+err_put:
+       drm_dev_put(drm);
 err_free:
        nvkm_device_del(pdevice);
 
index 941f35233b1fe01ae1ae36061dd3747ce89f23d4..5864cb452c5c7710b6c38950f715125567217097 100644 (file)
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
        return 0;
 }
 
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
-       rockchip_drm_platform_remove(pdev);
-}
-
 static const struct of_device_id rockchip_drm_dt_ids[] = {
        { .compatible = "rockchip,display-subsystem", },
        { /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
 static struct platform_driver rockchip_drm_platform_driver = {
        .probe = rockchip_drm_platform_probe,
        .remove = rockchip_drm_platform_remove,
-       .shutdown = rockchip_drm_platform_shutdown,
        .driver = {
                .name = "rockchip-drm",
                .of_match_table = rockchip_drm_dt_ids,
index 61a84b958d671671cb56c37b6e44a589989d026b..d7a2dfb8ee9b1101ed99a6ca6fe28e7000a8391c 100644 (file)
@@ -49,6 +49,8 @@
 
 #define VMWGFX_REPO "In Tree"
 
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
 
 /**
  * Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                spin_unlock(&dev_priv->cap_lock);
        }
 
-
+       vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
        ret = vmw_kms_init(dev_priv);
        if (unlikely(ret != 0))
                goto out_no_kms;
index 59f614225bcd72b8a84a5de42813359e9a2760d0..aca974b14b550b11d02e7d3d83ac77fc60e4f439 100644 (file)
@@ -606,6 +606,9 @@ struct vmw_private {
 
        struct vmw_cmdbuf_man *cman;
        DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+       /* Validation memory reservation */
+       struct vmw_validation_mem vvm;
 };
 
 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+                                       size_t gran);
 /**
  * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
index 5a6b70ba137aa321fe606946640464d861338087..f2d13a72c05d3ef1785422b8b5285536453aecee 100644 (file)
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
                                      void *buf)
 {
        struct vmw_buffer_object *vmw_bo;
-       int ret;
 
        struct {
                uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return vmw_translate_guest_ptr(dev_priv, sw_context,
                                       &cmd->body.ptr,
                                       &vmw_bo);
-       return ret;
 }
 
 
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        struct sync_file *sync_file = NULL;
        DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
 
+       vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
index 7b1e5a5cbd2c7758aadf70f784f4ce9c629ca1db..f882470467218bef553223b72ab1e0420523a514 100644 (file)
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
        drm_global_item_unref(&dev_priv->bo_global_ref.ref);
        drm_global_item_unref(&dev_priv->mem_global_ref);
 }
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+       static struct ttm_operation_ctx ctx = {.interruptible = false,
+                                              .no_wait_gpu = false};
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+       return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+       struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+       vvm->reserve_mem = vmw_vmt_reserve;
+       vvm->unreserve_mem = vmw_vmt_unreserve;
+       vvm->gran = gran;
+}
index 184025fa938e78fd0372d255b1db552bdb95a88d..f116f092e00bcbf33a4c73a12f504a2580435e6c 100644 (file)
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
                return NULL;
 
        if (ctx->mem_size_left < size) {
-               struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               struct page *page;
 
+               if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+                       int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+                       if (ret)
+                               return NULL;
+
+                       ctx->vm_size_left += ctx->vm->gran;
+                       ctx->total_mem += ctx->vm->gran;
+               }
+
+               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
                if (!page)
                        return NULL;
 
+               if (ctx->vm)
+                       ctx->vm_size_left -= PAGE_SIZE;
+
                list_add_tail(&page->lru, &ctx->page_list);
                ctx->page_address = page_address(page);
                ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
        }
 
        ctx->mem_size_left = 0;
+       if (ctx->vm && ctx->total_mem) {
+               ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+               ctx->total_mem = 0;
+               ctx->vm_size_left = 0;
+       }
 }
 
 /**
index b57e3292c386b28067a924a33f94dd614ad43190..3b396fea40d7182c6621e40045c77fa0ff50ebab 100644 (file)
 #include <linux/ww_mutex.h>
 #include <drm/ttm/ttm_execbuf_util.h>
 
+/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+       int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+       void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+       size_t gran;
+};
+
 /**
  * struct vmw_validation_context - Per command submission validation context
  * @ht: Hash table used to find resource- or buffer object duplicates
  * buffer objects
  * @mem_size_left: Free memory left in the last page in @page_list
  * @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
  */
 struct vmw_validation_context {
        struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
        unsigned int merge_dups;
        unsigned int mem_size_left;
        u8 *page_address;
+       struct vmw_validation_mem *vm;
+       size_t vm_size_left;
+       size_t total_mem;
 };
 
 struct vmw_buffer_object;
@@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
        return !list_empty(&ctx->bo_list);
 }
 
+/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+                          struct vmw_validation_mem *vm)
+{
+       ctx->vm = vm;
+}
+
 /**
  * vmw_validation_set_ht - Register a hash table for duplicate finding
  * @ctx: The validation context
index ed35c9a9a11090a7d4714e2cdfa83a5ca6761b6c..27519eb8ee636f8823d71a3ccf6802c8620e3745 100644 (file)
@@ -17,6 +17,9 @@
 #ifndef HID_IDS_H_FILE
 #define HID_IDS_H_FILE
 
+#define USB_VENDOR_ID_258A             0x258a
+#define USB_DEVICE_ID_258A_6A88                0x6a88
+
 #define USB_VENDOR_ID_3M               0x0596
 #define USB_DEVICE_ID_3M1968           0x0500
 #define USB_DEVICE_ID_3M2256           0x0502
 #define USB_VENDOR_ID_REALTEK          0x0bda
 #define USB_DEVICE_ID_REALTEK_READER   0x0152
 
+#define USB_VENDOR_ID_RETROUSB         0xf000
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD   0x0003
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT  0x00f1
+
 #define USB_VENDOR_ID_ROCCAT           0x1e7d
 #define USB_DEVICE_ID_ROCCAT_ARVO      0x30d4
 #define USB_DEVICE_ID_ROCCAT_ISKU      0x319c
index 1882a4ab0f29f48c8bed3dffa3cecc61a22d1661..98b059d79bc891948695fe53a6a774bf171376c8 100644 (file)
@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
 
 static const struct hid_device_id ite_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, ite_devices);
index c85a79986b6afc4f7b4e164e4f95d2b71c949024..94088c0ed68ab5758cbc43b1fdb4485358d0c0a2 100644 (file)
@@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
index 97954f575c3f691df222aa7102f01b5d296ff893..1c1a2514d6f31b8a2e8ce04293c949922b4e92ee 100644 (file)
@@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support"
 
 config HYPERV
        tristate "Microsoft Hyper-V client drivers"
-       depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+       depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
        select PARAVIRT
        help
          Select this option to run Linux as a Hyper-V client operating
index 283d184280aff10470d30c1314f4b349f3bcba68..d0ff65675292bd8b9ac7c6fa99d708e94484a62b 100644 (file)
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
 }
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_read_index);
 }
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.current_write_index);
 }
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
 }
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
        return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
 }
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
 }
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_read_index);
 }
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.current_write_index);
 }
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
 }
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
+       if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
        hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
        return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
 }
index 25d43c8f1c2a869ffc10548f1d91da7049669b66..558de0b9895cb979745b5002a623568bb5b21f7b 100644 (file)
@@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
        struct net_device *cookie_ndev = cookie;
        bool match = false;
 
+       if (!rdma_ndev)
+               return false;
+
        rcu_read_lock();
        if (netif_is_bond_master(cookie_ndev) &&
            rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
index 9b20479dc71075c7dd4dad3c82945cbdea7f65e2..7e6d70936c63c8142a656299c3ebdfd88ba5b3ba 100644 (file)
@@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
        }
 
        /* allocate space for the counter values */
-       dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+       dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+                           GFP_KERNEL);
        if (!dd->cntrs)
                goto bail;
 
index 1401b6ea4a287aab8c0cc48f5086c61aba82b588..2b882347d0c2ef6b6ebaa2c4bcab7d75cef4320d 100644 (file)
@@ -155,6 +155,8 @@ struct hfi1_ib_stats {
 extern struct hfi1_ib_stats hfi1_stats;
 extern const struct pci_error_handlers hfi1_pci_err_handler;
 
+extern int num_driver_cntrs;
+
 /*
  * First-cut criterion for "device is active" is
  * two thousand dwords combined Tx, Rx traffic per
index 6f3bc4dab858999740333d28f78c84f5adfa1117..1a016248039f74d4f9ffb9651e9d766981bf6e80 100644 (file)
@@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
        default:
                break;
        }
+
+       /*
+        * System latency between send and schedule is large enough that
+        * forcing call_send to true for piothreshold packets is necessary.
+        */
+       if (wqe->length <= piothreshold)
+               *call_send = true;
        return 0;
 }
 
index 48e11e51035888d46883a8a58bc43846df8ba803..a365089a9305b47b4b357186b28d8dbbd4a7ec8e 100644 (file)
@@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
 static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
 static const char **dev_cntr_names;
 static const char **port_cntr_names;
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
 static int num_dev_cntrs;
 static int num_port_cntrs;
 static int cntr_names_initialized;
index 61aab7c0c5135b2ea67a719943c2211de7a20932..45c421c871007ed488df7df2fc1b58f322a4f661 100644 (file)
@@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
 
        err = uverbs_get_flags32(&access, attrs,
                                 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
-                                IB_ACCESS_SUPPORTED);
+                                IB_ACCESS_LOCAL_WRITE |
+                                IB_ACCESS_REMOTE_WRITE |
+                                IB_ACCESS_REMOTE_READ);
        if (err)
                return err;
 
index 2cc3d69ab6f64dde00ee48c82ff93c5edca697f4..4dc6cc640ce0c86a7fc1f95b5b61c3324bca193c 100644 (file)
@@ -506,14 +506,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                        u64 io_virt, size_t bcnt, u32 *bytes_mapped)
 {
+       int npages = 0, current_seq, page_shift, ret, np;
+       bool implicit = false;
        struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
        u64 access_mask = ODP_READ_ALLOWED_BIT;
-       int npages = 0, page_shift, np;
        u64 start_idx, page_mask;
        struct ib_umem_odp *odp;
-       int current_seq;
        size_t size;
-       int ret;
 
        if (!odp_mr->page_list) {
                odp = implicit_mr_get_data(mr, io_virt, bcnt);
@@ -521,7 +520,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
                if (IS_ERR(odp))
                        return PTR_ERR(odp);
                mr = odp->private;
-
+               implicit = true;
        } else {
                odp = odp_mr;
        }
@@ -600,7 +599,7 @@ next_mr:
 
 out:
        if (ret == -EAGAIN) {
-               if (mr->parent || !odp->dying) {
+               if (implicit || !odp->dying) {
                        unsigned long timeout =
                                msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
 
index 5936de71883fb7f637b0192bf3144910d6e193d5..6fc93834da44648176a5163a098fdd485af2bf50 100644 (file)
@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
        bool dirty_flag;
        *result = true;
 
+       if (from_cblock(cmd->cache_blocks) == 0)
+               /* Nothing to do */
+               return 0;
+
        r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
                                   from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
        if (r) {
index 0bd8d498b3b90dc8fdc173adcda33640ca73c04c..dadd9696340c00d13a47d38822fe886aaf5713d8 100644 (file)
@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
 struct dm_thin_new_mapping;
 
 /*
- * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
+ * The pool runs in various modes.  Ordered in degraded order for comparisons.
  */
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
@@ -282,9 +282,38 @@ struct pool {
        mempool_t mapping_pool;
 };
 
-static enum pool_mode get_pool_mode(struct pool *pool);
 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
 
+static enum pool_mode get_pool_mode(struct pool *pool)
+{
+       return pool->pf.mode;
+}
+
+static void notify_of_pool_mode_change(struct pool *pool)
+{
+       const char *descs[] = {
+               "write",
+               "out-of-data-space",
+               "read-only",
+               "read-only",
+               "fail"
+       };
+       const char *extra_desc = NULL;
+       enum pool_mode mode = get_pool_mode(pool);
+
+       if (mode == PM_OUT_OF_DATA_SPACE) {
+               if (!pool->pf.error_if_no_space)
+                       extra_desc = " (queue IO)";
+               else
+                       extra_desc = " (error IO)";
+       }
+
+       dm_table_event(pool->ti->table);
+       DMINFO("%s: switching pool to %s%s mode",
+              dm_device_name(pool->pool_md),
+              descs[(int)mode], extra_desc ? : "");
+}
+
 /*
  * Target context for a pool.
  */
@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
-
 /*
  * We're holding onto IO to allow userland time to react.  After the
  * timeout either the pool will have been resized (and thus back in
@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
 
        if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
                pool->pf.error_if_no_space = true;
-               notify_of_pool_mode_change_to_oods(pool);
+               notify_of_pool_mode_change(pool);
                error_retry_list_with_code(pool, BLK_STS_NOSPC);
        }
 }
@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 
 /*----------------------------------------------------------------*/
 
-static enum pool_mode get_pool_mode(struct pool *pool)
-{
-       return pool->pf.mode;
-}
-
-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
-{
-       dm_table_event(pool->ti->table);
-       DMINFO("%s: switching pool to %s mode",
-              dm_device_name(pool->pool_md), new_mode);
-}
-
-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
-{
-       if (!pool->pf.error_if_no_space)
-               notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
-       else
-               notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
-}
-
 static bool passdown_enabled(struct pool_c *pt)
 {
        return pt->adjusted_pf.discard_passdown;
@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 
        switch (new_mode) {
        case PM_FAIL:
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change(pool, "failure");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_fail;
                pool->process_discard = process_bio_fail;
@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 
        case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (!is_read_only_pool_mode(old_mode))
-                       notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
                pool->process_discard = process_bio_success;
@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                 * alarming rate.  Adjust your low water mark if you're
                 * frequently seeing this mode.
                 */
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change_to_oods(pool);
                pool->out_of_data_space = true;
                pool->process_bio = process_bio_read_only;
                pool->process_discard = process_discard_bio;
@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                break;
 
        case PM_WRITE:
-               if (old_mode != new_mode)
-                       notify_of_pool_mode_change(pool, "write");
                if (old_mode == PM_OUT_OF_DATA_SPACE)
                        cancel_delayed_work_sync(&pool->no_space_timeout);
                pool->out_of_data_space = false;
@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
         * doesn't cause an unexpected mode transition on resume.
         */
        pt->adjusted_pf.mode = new_mode;
+
+       if (old_mode != new_mode)
+               notify_of_pool_mode_change(pool);
 }
 
 static void abort_transaction(struct pool *pool)
@@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 20, 0},
+       .version = {1, 21, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 20, 0},
+       .version = {1, 21, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index 981154e5946147367f98fbe1e40342ba4541ee6c..6af5babe6837605c7a25983ef61fb7d83e73af30 100644 (file)
@@ -20,7 +20,6 @@ struct dmz_bioctx {
        struct dm_zone          *zone;
        struct bio              *bio;
        refcount_t              ref;
-       blk_status_t            status;
 };
 
 /*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
 {
        struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
 
-       if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
-               bioctx->status = status;
-       bio_endio(bio);
+       if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
+               bio->bi_status = status;
+
+       if (refcount_dec_and_test(&bioctx->ref)) {
+               struct dm_zone *zone = bioctx->zone;
+
+               if (zone) {
+                       if (bio->bi_status != BLK_STS_OK &&
+                           bio_op(bio) == REQ_OP_WRITE &&
+                           dmz_is_seq(zone))
+                               set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+                       dmz_deactivate_zone(zone);
+               }
+               bio_endio(bio);
+       }
 }
 
 /*
- * Partial clone read BIO completion callback. This terminates the
+ * Completion callback for an internally cloned target BIO. This terminates the
  * target BIO when there are no more references to its context.
  */
-static void dmz_read_bio_end_io(struct bio *bio)
+static void dmz_clone_endio(struct bio *clone)
 {
-       struct dmz_bioctx *bioctx = bio->bi_private;
-       blk_status_t status = bio->bi_status;
+       struct dmz_bioctx *bioctx = clone->bi_private;
+       blk_status_t status = clone->bi_status;
 
-       bio_put(bio);
+       bio_put(clone);
        dmz_bio_endio(bioctx->bio, status);
 }
 
 /*
- * Issue a BIO to a zone. The BIO may only partially process the
+ * Issue a clone of a target BIO. The clone may only partially process the
  * original target BIO.
  */
-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
-                              struct bio *bio, sector_t chunk_block,
-                              unsigned int nr_blocks)
+static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+                         struct bio *bio, sector_t chunk_block,
+                         unsigned int nr_blocks)
 {
        struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-       sector_t sector;
        struct bio *clone;
 
-       /* BIO remap sector */
-       sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-
-       /* If the read is not partial, there is no need to clone the BIO */
-       if (nr_blocks == dmz_bio_blocks(bio)) {
-               /* Setup and submit the BIO */
-               bio->bi_iter.bi_sector = sector;
-               refcount_inc(&bioctx->ref);
-               generic_make_request(bio);
-               return 0;
-       }
-
-       /* Partial BIO: we need to clone the BIO */
        clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
        if (!clone)
                return -ENOMEM;
 
-       /* Setup the clone */
-       clone->bi_iter.bi_sector = sector;
+       bio_set_dev(clone, dmz->dev->bdev);
+       clone->bi_iter.bi_sector =
+               dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
        clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
-       clone->bi_end_io = dmz_read_bio_end_io;
+       clone->bi_end_io = dmz_clone_endio;
        clone->bi_private = bioctx;
 
        bio_advance(bio, clone->bi_iter.bi_size);
 
-       /* Submit the clone */
        refcount_inc(&bioctx->ref);
        generic_make_request(clone);
 
+       if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+               zone->wp_block += nr_blocks;
+
        return 0;
 }
 
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
                if (nr_blocks) {
                        /* Valid blocks found: read them */
                        nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
-                       ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+                       ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
                        if (ret)
                                return ret;
                        chunk_block += nr_blocks;
@@ -228,25 +228,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
        return 0;
 }
 
-/*
- * Issue a write BIO to a zone.
- */
-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
-                                struct bio *bio, sector_t chunk_block,
-                                unsigned int nr_blocks)
-{
-       struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
-       /* Setup and submit the BIO */
-       bio_set_dev(bio, dmz->dev->bdev);
-       bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-       refcount_inc(&bioctx->ref);
-       generic_make_request(bio);
-
-       if (dmz_is_seq(zone))
-               zone->wp_block += nr_blocks;
-}
-
 /*
  * Write blocks directly in a data zone, at the write pointer.
  * If a buffer zone is assigned, invalidate the blocks written
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
                return -EROFS;
 
        /* Submit write */
-       dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
+       ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
+       if (ret)
+               return ret;
 
        /*
         * Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
                return -EROFS;
 
        /* Submit write */
-       dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+       ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+       if (ret)
+               return ret;
 
        /*
         * Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
        bioctx->zone = NULL;
        bioctx->bio = bio;
        refcount_set(&bioctx->ref, 1);
-       bioctx->status = BLK_STS_OK;
 
        /* Set the BIO pending in the flush list */
        if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -623,35 +607,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-/*
- * Completed target BIO processing.
- */
-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
-{
-       struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
-       if (bioctx->status == BLK_STS_OK && *error)
-               bioctx->status = *error;
-
-       if (!refcount_dec_and_test(&bioctx->ref))
-               return DM_ENDIO_INCOMPLETE;
-
-       /* Done */
-       bio->bi_status = bioctx->status;
-
-       if (bioctx->zone) {
-               struct dm_zone *zone = bioctx->zone;
-
-               if (*error && bio_op(bio) == REQ_OP_WRITE) {
-                       if (dmz_is_seq(zone))
-                               set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
-               }
-               dmz_deactivate_zone(zone);
-       }
-
-       return DM_ENDIO_DONE;
-}
-
 /*
  * Get zoned device information.
  */
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
        .ctr             = dmz_ctr,
        .dtr             = dmz_dtr,
        .map             = dmz_map,
-       .end_io          = dmz_end_io,
        .io_hints        = dmz_io_hints,
        .prepare_ioctl   = dmz_prepare_ioctl,
        .postsuspend     = dmz_suspend,
index c510179a7f845eb4f25e5818e39e54eb9afadcfb..63a7c416b224e7e840a87a6b2259a18fb83f0898 100644 (file)
@@ -1593,6 +1593,8 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                return ret;
        }
 
+       blk_queue_split(md->queue, &bio);
+
        init_clone_info(&ci, md, map, bio);
 
        if (bio->bi_opf & REQ_PREFLUSH) {
index 8add62a18293eff36f081922f9f73e3b1da26d31..102eb35fcf3fe414502d3c2212819fdf2b08efc8 100644 (file)
@@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
 
          This is currently experimental.
 
+config MEDIA_CONTROLLER_REQUEST_API
+       bool "Enable Media controller Request API (EXPERIMENTAL)"
+       depends on MEDIA_CONTROLLER && STAGING_MEDIA
+       default n
+       ---help---
+         DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+
+         This option enables the Request API for the Media controller and V4L2
+         interfaces. It is currently needed by a few stateless codec drivers.
+
+         There is currently no intention to provide API or ABI stability for
+         this new API as of yet.
+
 #
 # Video4Linux support
 #      Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
index 975ff5669f729ffa7b264fa48ece8c131f24ba30..8ff8722cb6b16dbf9b742eee7911dadba3037159 100644 (file)
@@ -947,7 +947,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        }
        atomic_dec(&q->owned_by_drv_count);
 
-       if (vb->req_obj.req) {
+       if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
                /* This is not supported at the moment */
                WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
                media_request_object_unbind(&vb->req_obj);
@@ -1359,8 +1359,12 @@ static void vb2_req_release(struct media_request_object *obj)
 {
        struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
 
-       if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+       if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
                vb->state = VB2_BUF_STATE_DEQUEUED;
+               if (vb->request)
+                       media_request_put(vb->request);
+               vb->request = NULL;
+       }
 }
 
 static const struct media_request_object_ops vb2_core_req_ops = {
@@ -1528,6 +1532,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
                        return ret;
 
                vb->state = VB2_BUF_STATE_IN_REQUEST;
+
+               /*
+                * Increment the refcount and store the request.
+                * The request refcount is decremented again when the
+                * buffer is dequeued. This is to prevent vb2_buffer_done()
+                * from freeing the request from interrupt context, which can
+                * happen if the application closed the request fd after
+                * queueing the request.
+                */
+               media_request_get(req);
+               vb->request = req;
+
                /* Fill buffer information for the userspace */
                if (pb) {
                        call_void_bufop(q, copy_timestamp, vb, pb);
@@ -1749,10 +1765,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
                        call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
                        vb->planes[i].dbuf_mapped = 0;
                }
-       if (vb->req_obj.req) {
-               media_request_object_unbind(&vb->req_obj);
-               media_request_object_put(&vb->req_obj);
-       }
        call_void_bufop(q, init_buffer, vb);
 }
 
@@ -1797,6 +1809,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
        /* go back to dequeued state */
        __vb2_dqbuf(vb);
 
+       if (WARN_ON(vb->req_obj.req)) {
+               media_request_object_unbind(&vb->req_obj);
+               media_request_object_put(&vb->req_obj);
+       }
+       if (vb->request)
+               media_request_put(vb->request);
+       vb->request = NULL;
+
        dprintk(2, "dqbuf of buffer %d, with state %d\n",
                        vb->index, vb->state);
 
@@ -1903,6 +1923,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
                        vb->prepared = false;
                }
                __vb2_dqbuf(vb);
+
+               if (vb->req_obj.req) {
+                       media_request_object_unbind(&vb->req_obj);
+                       media_request_object_put(&vb->req_obj);
+               }
+               if (vb->request)
+                       media_request_put(vb->request);
+               vb->request = NULL;
        }
 }
 
@@ -1940,10 +1968,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
                if (ret)
                        return ret;
                ret = vb2_start_streaming(q);
-               if (ret) {
-                       __vb2_queue_cancel(q);
+               if (ret)
                        return ret;
-               }
        }
 
        q->streaming = 1;
index a17033ab2c2290e5dca5c9ea80849f9de918e290..1d35aeabfd85ac79de7cb38e98d83f108acdb220 100644 (file)
@@ -333,10 +333,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
 }
 
 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
-                                   struct v4l2_buffer *b,
-                                   const char *opname,
+                                   struct v4l2_buffer *b, bool is_prepare,
                                    struct media_request **p_req)
 {
+       const char *opname = is_prepare ? "prepare_buf" : "qbuf";
        struct media_request *req;
        struct vb2_v4l2_buffer *vbuf;
        struct vb2_buffer *vb;
@@ -378,6 +378,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
                        return ret;
        }
 
+       if (is_prepare)
+               return 0;
+
        if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
                if (q->uses_requests) {
                        dprintk(1, "%s: queue uses requests\n", opname);
@@ -631,8 +634,10 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
                *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
        if (q->io_modes & VB2_DMABUF)
                *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
        if (q->supports_requests)
                *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+#endif
 }
 
 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@@ -657,7 +662,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
        if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
                return -EINVAL;
 
-       ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
+       ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
 
        return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
 }
@@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
                return -EBUSY;
        }
 
-       ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+       ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
        if (ret)
                return ret;
        ret = vb2_core_qbuf(q, b->index, b, req);
index bed24372e61fcd8c8f588321ceb4243b457afc88..b8ec88612df7079e860bfc30c8098b8947d3711a 100644 (file)
@@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
 static long media_device_request_alloc(struct media_device *mdev,
                                       int *alloc_fd)
 {
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
        if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
                return -ENOTTY;
 
        return media_request_alloc(mdev, alloc_fd);
+#else
+       return -ENOTTY;
+#endif
 }
 
 static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
index 013cdebecbc49b5a899a91fb8455383f2f6df506..13fb69c58967d0dbf527c4e0f39653410550da5d 100644 (file)
@@ -997,11 +997,18 @@ static int vicodec_start_streaming(struct vb2_queue *q,
 
        q_data->sequence = 0;
 
-       if (!V4L2_TYPE_IS_OUTPUT(q->type))
+       if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+               if (!ctx->is_enc) {
+                       state->width = q_data->width;
+                       state->height = q_data->height;
+               }
                return 0;
+       }
 
-       state->width = q_data->width;
-       state->height = q_data->height;
+       if (ctx->is_enc) {
+               state->width = q_data->width;
+               state->height = q_data->height;
+       }
        state->ref_frame.width = state->ref_frame.height = 0;
        state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
                                         GFP_KERNEL);
index dcdc80e272c209132a57826edb9b15e68fbaf4fe..9acc709b0740fe5d5de393edccdd78c91ee5db5b 100644 (file)
@@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_sdr_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 903cebeb5ce50c2b9d2cc8d76ee8a13895e8d556..d666271bdaeddde6c3c712ca761a4e439d33c77c 100644 (file)
@@ -204,8 +204,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vbi_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 9357c07e30d645685657ccc0fc36d34cf2c45b8a..cd56476902a2b16cbcbc77ec5b0f8fa6ebeac0fb 100644 (file)
@@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vbi_out);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 9c8e8be81ce34ccd2071d9bd52088ad940f7a98f..673772cd17d61b22a4073af17268dce226ef475a 100644 (file)
@@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vid_cap);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index aaf13f03d5d4347d42312324327e0f4fd3368d63..628eae154ee709e5cf96128c15f19d73e51442e1 100644 (file)
@@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
 
                list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
                        list_del(&buf->list);
-                       v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
-                                                  &dev->ctrl_hdl_vid_out);
                        vb2_buffer_done(&buf->vb.vb2_buf,
                                        VB2_BUF_STATE_QUEUED);
                }
index 0b18f0bd74199885dcb19f9c0fd7ca801e2b39e8..8b0a26335d70a287d6a31fb262d45c45e9ad8bc8 100644 (file)
@@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
        format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
                                            LIF_PAD_SOURCE);
 
-       switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) {
+       switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
        case VI6_IP_VERSION_MODEL_VSPD_GEN2:
        case VI6_IP_VERSION_MODEL_VSPD_V2H:
                hbth = 1536;
index 5f2b033a7a42f1cb35dd13e40a498232bd3a225d..10b8d94edbef1536c114a7c8b8a2d9110b8a8f31 100644 (file)
@@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
        u64 offset;
        s64 val;
 
-       switch (ctrl->type) {
+       switch ((u32)ctrl->type) {
        case V4L2_CTRL_TYPE_INTEGER:
                return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
        case V4L2_CTRL_TYPE_INTEGER64:
@@ -2232,7 +2232,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
        is_array = nr_of_dims > 0;
 
        /* Prefill elem_size for all types handled by std_type_ops */
-       switch (type) {
+       switch ((u32)type) {
        case V4L2_CTRL_TYPE_INTEGER64:
                elem_size = sizeof(s64);
                break;
index c35b5b08bb334626279d3d4f12a8a4c6af691492..111934838da2c14ef439a4a3b2907d46093380c9 100644 (file)
@@ -472,7 +472,7 @@ out:
 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
                               struct mmc_blk_ioc_data *idata)
 {
-       struct mmc_command cmd = {};
+       struct mmc_command cmd = {}, sbc = {};
        struct mmc_data data = {};
        struct mmc_request mrq = {};
        struct scatterlist sg;
@@ -550,10 +550,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        }
 
        if (idata->rpmb) {
-               err = mmc_set_blockcount(card, data.blocks,
-                       idata->ic.write_flag & (1 << 31));
-               if (err)
-                       return err;
+               sbc.opcode = MMC_SET_BLOCK_COUNT;
+               /*
+                * We don't do any blockcount validation because the max size
+                * may be increased by a future standard. We just copy the
+                * 'Reliable Write' bit here.
+                */
+               sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+               sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+               mrq.sbc = &sbc;
        }
 
        if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
index bc1bd2c256132c2f3def2844a7d5ccccb05ef523..55997cf84b39f5dd9c3ced5ea9db6702cb02c65a 100644 (file)
@@ -30,6 +30,7 @@
 #include "pwrseq.h"
 
 #define DEFAULT_CMD6_TIMEOUT_MS        500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
 
 static const unsigned int tran_exp[] = {
        10000,          100000,         1000000,        10000000,
@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        card->cid.year += 16;
 
                /* check whether the eMMC card supports BKOPS */
-               if (!mmc_card_broken_hpi(card) &&
-                   ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+               if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
                        card->ext_csd.bkops = 1;
                        card->ext_csd.man_bkops_en =
                                        (ext_csd[EXT_CSD_BKOPS_EN] &
@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
                if (err) {
                        pr_warn("%s: Enabling HPI failed\n",
                                mmc_hostname(card->host));
+                       card->ext_csd.hpi_en = 0;
                        err = 0;
-               } else
+               } else {
                        card->ext_csd.hpi_en = 1;
+               }
        }
 
        /*
-        * If cache size is higher than 0, this indicates
-        * the existence of cache and it can be turned on.
+        * If cache size is higher than 0, this indicates the existence of cache
+        * and it can be turned on. Note that some eMMCs from Micron has been
+        * reported to need ~800 ms timeout, while enabling the cache after
+        * sudden power failure tests. Let's extend the timeout to a minimum of
+        * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
         */
-       if (!mmc_card_broken_hpi(card) &&
-           card->ext_csd.cache_size > 0) {
+       if (card->ext_csd.cache_size > 0) {
+               unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+               timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                               EXT_CSD_CACHE_CTRL, 1,
-                               card->ext_csd.generic_cmd6_time);
+                               EXT_CSD_CACHE_CTRL, 1, timeout_ms);
                if (err && err != -EBADMSG)
                        goto free_card;
 
index adf32682f27a3c8f96c2c244af96cae2bafbd6a8..c60a7625b1fab7ff0657ea48be126fb384c261b7 100644 (file)
@@ -104,6 +104,7 @@ struct mmc_omap_slot {
        unsigned int            vdd;
        u16                     saved_con;
        u16                     bus_mode;
+       u16                     power_mode;
        unsigned int            fclk_freq;
 
        struct tasklet_struct   cover_tasklet;
@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        struct mmc_omap_slot *slot = mmc_priv(mmc);
        struct mmc_omap_host *host = slot->host;
        int i, dsor;
-       int clk_enabled;
+       int clk_enabled, init_stream;
 
        mmc_omap_select_slot(slot, 0);
 
@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                slot->vdd = ios->vdd;
 
        clk_enabled = 0;
+       init_stream = 0;
        switch (ios->power_mode) {
        case MMC_POWER_OFF:
                mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        case MMC_POWER_UP:
                /* Cannot touch dsor yet, just power up MMC */
                mmc_omap_set_power(slot, 1, ios->vdd);
+               slot->power_mode = ios->power_mode;
                goto exit;
        case MMC_POWER_ON:
                mmc_omap_fclk_enable(host, 1);
                clk_enabled = 1;
                dsor |= 1 << 11;
+               if (slot->power_mode != MMC_POWER_ON)
+                       init_stream = 1;
                break;
        }
+       slot->power_mode = ios->power_mode;
 
        if (slot->bus_mode != ios->bus_mode) {
                if (slot->pdata->set_bus_mode != NULL)
@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        for (i = 0; i < 2; i++)
                OMAP_MMC_WRITE(host, CON, dsor);
        slot->saved_con = dsor;
-       if (ios->power_mode == MMC_POWER_ON) {
+       if (init_stream) {
                /* worst case at 400kHz, 80 cycles makes 200 microsecs */
                int usecs = 250;
 
@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
        slot->host = host;
        slot->mmc = mmc;
        slot->id = id;
+       slot->power_mode = MMC_POWER_UNDEFINED;
        slot->pdata = &host->pdata->slots[id];
 
        host->slots[id] = slot;
index 467d889a16386c26e273a0157ce25787ea6116ce..3f4ea8f624be5f1e62a109b8e18ade78be7d3954 100644 (file)
@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
        mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
        mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
        mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
-       mmc->max_seg_size = mmc->max_req_size;
 
        mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
                     MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
+       /*
+        * Limit the maximum segment size to the lower of the request size
+        * and the DMA engine device segment size limits.  In reality, with
+        * 32-bit transfers, the DMA engine can do longer segments than this
+        * but there is no way to represent that in the DMA model - if we
+        * increase this figure here, we get warnings from the DMA API debug.
+        */
+       mmc->max_seg_size = min3(mmc->max_req_size,
+                       dma_get_max_seg_size(host->rx_chan->device->dev),
+                       dma_get_max_seg_size(host->tx_chan->device->dev));
+
        /* Request IRQ for MMC operations */
        ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
                        mmc_hostname(mmc), host);
index 88347ce78f23feee0b1b2f1f191c0891ce563358..d264391616f9310fc4433e6aca72ac65439396c9 100644 (file)
@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
        struct device *dev = omap_host->dev;
        struct mmc_ios *ios = &mmc->ios;
        u32 start_window = 0, max_window = 0;
+       bool dcrc_was_enabled = false;
        u8 cur_match, prev_match = 0;
        u32 length = 0, max_len = 0;
-       u32 ier = host->ier;
        u32 phase_delay = 0;
        int ret = 0;
        u32 reg;
@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * during the tuning procedure. So disable it during the
         * tuning procedure.
         */
-       ier &= ~SDHCI_INT_DATA_CRC;
-       sdhci_writel(host, ier, SDHCI_INT_ENABLE);
-       sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+       if (host->ier & SDHCI_INT_DATA_CRC) {
+               host->ier &= ~SDHCI_INT_DATA_CRC;
+               dcrc_was_enabled = true;
+       }
 
        while (phase_delay <= MAX_PHASE_DELAY) {
                sdhci_omap_set_dll(omap_host, phase_delay);
@@ -366,6 +367,9 @@ tuning_error:
 
 ret:
        sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+       /* Reenable forbidden interrupt */
+       if (dcrc_was_enabled)
+               host->ier |= SDHCI_INT_DATA_CRC;
        sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
        sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
        return ret;
index 7b95d088fdefd5ff4067c12960b00f3185429440..e6ace31e2a418cc80e0df72eb66f89d1119c26ca 100644 (file)
@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-up-offset-3v3-timeout",
-                       &autocal->pull_up_3v3);
+                       &autocal->pull_up_3v3_timeout);
        if (err)
                autocal->pull_up_3v3_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-down-offset-3v3-timeout",
-                       &autocal->pull_down_3v3);
+                       &autocal->pull_down_3v3_timeout);
        if (err)
                autocal->pull_down_3v3_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-up-offset-1v8-timeout",
-                       &autocal->pull_up_1v8);
+                       &autocal->pull_up_1v8_timeout);
        if (err)
                autocal->pull_up_1v8_timeout = 0;
 
        err = device_property_read_u32(host->mmc->parent,
                        "nvidia,pad-autocal-pull-down-offset-1v8-timeout",
-                       &autocal->pull_down_1v8);
+                       &autocal->pull_down_1v8_timeout);
        if (err)
                autocal->pull_down_1v8_timeout = 0;
 
index 99bdae53fa2e089f373c2ec6ecc73587a797b2ae..df05352b6a4aa5bef6c54092078f7bd75acc455a 100644 (file)
@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
 {
        u16 ctrl2;
 
-       ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
+       ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
        if (ctrl2 & SDHCI_CTRL_V4_MODE)
                return;
 
        ctrl2 |= SDHCI_CTRL_V4_MODE;
-       sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
+       sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
 }
 
 /*
@@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
        timeout = ktime_add_ms(ktime_get(), 100);
 
        /* hw clears the bit when it's done */
-       while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
-               if (ktime_after(ktime_get(), timeout)) {
+       while (1) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
+                       break;
+               if (timedout) {
                        pr_err("%s: Reset 0x%x never completed.\n",
                                mmc_hostname(host->mmc), (int)mask);
                        sdhci_dumpregs(host);
@@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
 
        /* Wait max 20 ms */
        timeout = ktime_add_ms(ktime_get(), 20);
-       while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
-               & SDHCI_CLOCK_INT_STABLE)) {
-               if (ktime_after(ktime_get(), timeout)) {
+       while (1) {
+               bool timedout = ktime_after(ktime_get(), timeout);
+
+               clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+               if (clk & SDHCI_CLOCK_INT_STABLE)
+                       break;
+               if (timedout) {
                        pr_err("%s: Internal clock never stabilised.\n",
                               mmc_hostname(host->mmc));
                        sdhci_dumpregs(host);
index e05d4eddc9351d9c582cc51cae07a49ae2055e4f..24fb6a68503966a4ae049ef6b6fafa7d2bce7edf 100644 (file)
@@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
        u16 *p = _p;
        int i;
 
-       regs->version = 0;
+       regs->version = chip->info->prod_num;
 
        memset(p, 0xff, 32 * sizeof(u16));
 
index 3b889efddf789643803b371420c321e90b0e5481..50dd6bf176d034721590bf15c2abfab88dd5285a 100644 (file)
@@ -29,9 +29,6 @@
 #define RES_RING_CSR   1
 #define RES_RING_CMD   2
 
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
        struct xgene_enet_raw_desc16 *raw_desc;
index 0de487a8f0eb22d7b033c4f9751b68cf2454286e..5cd3135dfe302331b30e6bb1105add2a95189c6c 100644 (file)
@@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
        BNX2X_SP_RTNL_TX_STOP,
        BNX2X_SP_RTNL_GET_DRV_VERSION,
        BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+       BNX2X_SP_RTNL_UPDATE_SVID,
 };
 
 enum bnx2x_iov_flag {
@@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
 void bnx2x_init_ptp(struct bnx2x *bp);
 int bnx2x_configure_ptp_filters(struct bnx2x *bp);
 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+void bnx2x_register_phc(struct bnx2x *bp);
 
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
index 686899d7e555e84a4c78459e1780765b9794b913..ecb1bd7eb5080d1d47725e0c98f3153437fb5a42 100644 (file)
@@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        bnx2x_set_rx_mode_inner(bp);
 
        if (bp->flags & PTP_SUPPORTED) {
+               bnx2x_register_phc(bp);
                bnx2x_init_ptp(bp);
                bnx2x_configure_ptp_filters(bp);
        }
index 95309b27c7d19cca8261ede60d3a32a04ebbab39..b164f705709d083576e92fb4e2fd007cd3dbe9bb 100644 (file)
@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
+       /* Prepare parameters for function state transitions */
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
                int func = BP_ABS_FUNC(bp);
                u32 val;
@@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                                bnx2x_handle_eee_event(bp);
 
                        if (val & DRV_STATUS_OEM_UPDATE_SVID)
-                               bnx2x_handle_update_svid_cmd(bp);
+                               bnx2x_schedule_sp_rtnl(bp,
+                                       BNX2X_SP_RTNL_UPDATE_SVID, 0);
 
                        if (bp->link_vars.periodic_flags &
                            PERIODIC_FLAGS_LINK_EVENT) {
@@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
                REG_WR(bp, reg_addr, val);
        }
 
+       if (CHIP_IS_E3B0(bp))
+               bp->flags |= PTP_SUPPORTED;
+
        return 0;
 }
 
@@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
        /* Fill a user request section if needed */
        if (!test_bit(RAMROD_CONT, ramrod_flags)) {
                ramrod_param.user_req.u.vlan.vlan = vlan;
+               __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
                /* Set the command: ADD or DEL */
                if (set)
                        ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
        return rc;
 }
 
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+       unsigned long ramrod_flags = 0, vlan_flags = 0;
+       struct bnx2x_vlan_entry *vlan;
+       int rc;
+
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       __set_bit(BNX2X_VLAN, &vlan_flags);
+       rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+       if (rc)
+               return rc;
+
+       /* Mark that hw forgot all entries */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+       bp->vlan_cnt = 0;
+
+       return 0;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
                       struct bnx2x_vlan_mac_obj *mac_obj,
                       int mac_type, bool wait_for_comp)
@@ -9330,6 +9360,11 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
                BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
                          rc);
 
+       /* Remove all currently configured VLANs */
+       rc = bnx2x_del_all_vlans(bp);
+       if (rc < 0)
+               BNX2X_ERR("Failed to delete all VLANs\n");
+
        /* Disable LLH */
        if (!CHIP_IS_E1(bp))
                REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9417,8 +9452,13 @@ unload_error:
         * function stop ramrod is sent, since as part of this ramrod FW access
         * PTP registers.
         */
-       if (bp->flags & PTP_SUPPORTED)
+       if (bp->flags & PTP_SUPPORTED) {
                bnx2x_stop_ptp(bp);
+               if (bp->ptp_clock) {
+                       ptp_clock_unregister(bp->ptp_clock);
+                       bp->ptp_clock = NULL;
+               }
+       }
 
        /* Disable HW interrupts, NAPI */
        bnx2x_netif_stop(bp, 1);
@@ -10359,6 +10399,9 @@ sp_rtnl_not_reset:
                               &bp->sp_rtnl_state))
                bnx2x_update_mng_version(bp);
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+               bnx2x_handle_update_svid_cmd(bp);
+
        if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
                               &bp->sp_rtnl_state)) {
                if (bnx2x_udp_port_update(bp)) {
@@ -11750,8 +11793,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
         * If maximum allowed number of connections is zero -
         * disable the feature.
         */
-       if (!bp->cnic_eth_dev.max_fcoe_conn)
+       if (!bp->cnic_eth_dev.max_fcoe_conn) {
                bp->flags |= NO_FCOE_FLAG;
+               eth_zero_addr(bp->fip_mac);
+       }
 }
 
 static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -12494,9 +12539,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bp->dump_preset_idx = 1;
 
-       if (CHIP_IS_E3B0(bp))
-               bp->flags |= PTP_SUPPORTED;
-
        return rc;
 }
 
@@ -13024,13 +13066,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
 
 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
 {
-       struct bnx2x_vlan_entry *vlan;
-
-       /* The hw forgot all entries after reload */
-       list_for_each_entry(vlan, &bp->vlan_reg, link)
-               vlan->hw = false;
-       bp->vlan_cnt = 0;
-
        /* Don't set rx mode here. Our caller will do it. */
        bnx2x_vlan_configure(bp, false);
 
@@ -13895,7 +13930,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
        return -ENOTSUPP;
 }
 
-static void bnx2x_register_phc(struct bnx2x *bp)
+void bnx2x_register_phc(struct bnx2x *bp)
 {
        /* Fill the ptp_clock_info struct and register PTP clock*/
        bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14097,8 +14132,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
               dev->base_addr, bp->pdev->irq, dev->dev_addr);
        pcie_print_link_status(bp->pdev);
 
-       bnx2x_register_phc(bp);
-
        if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
                bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
 
@@ -14131,11 +14164,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                           struct bnx2x *bp,
                           bool remove_netdev)
 {
-       if (bp->ptp_clock) {
-               ptp_clock_unregister(bp->ptp_clock);
-               bp->ptp_clock = NULL;
-       }
-
        /* Delete storage MAC address */
        if (!NO_FCOE(bp)) {
                rtnl_lock();
index 0bf2fd470819e64d2d7788b57caee6569b9b3828..7a6e82db423123585574c88765fb00f14ab3b603 100644 (file)
@@ -265,6 +265,7 @@ enum {
        BNX2X_ETH_MAC,
        BNX2X_ISCSI_ETH_MAC,
        BNX2X_NETQ_ETH_MAC,
+       BNX2X_VLAN,
        BNX2X_DONT_CONSUME_CAM_CREDIT,
        BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
 };
@@ -272,7 +273,8 @@ enum {
 #define BNX2X_VLAN_MAC_CMP_MASK        (1 << BNX2X_UC_LIST_MAC | \
                                 1 << BNX2X_ETH_MAC | \
                                 1 << BNX2X_ISCSI_ETH_MAC | \
-                                1 << BNX2X_NETQ_ETH_MAC)
+                                1 << BNX2X_NETQ_ETH_MAC | \
+                                1 << BNX2X_VLAN)
 #define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
        ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
 
index 6cc69a58478a5ffaad8b7a0511d68e36a93657f3..6b51f4de601743a6d24fb54ea3e3159b39af9acc 100644 (file)
@@ -2572,6 +2572,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 static int bnxt_run_loopback(struct bnxt *bp)
 {
        struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
        struct bnxt_cp_ring_info *cpr;
        int pkt_size, i = 0;
        struct sk_buff *skb;
@@ -2579,7 +2580,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
        u8 *data;
        int rc;
 
-       cpr = &txr->bnapi->cp_ring;
+       cpr = &rxr->bnapi->cp_ring;
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
        pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
        skb = netdev_alloc_skb(bp->dev, pkt_size);
        if (!skb)
index 1d86b4d5645ad884b34103a45eb42c7af95f0745..4c816e5a841fa3c976114c93ad86b08f4d8f2195 100644 (file)
@@ -61,7 +61,8 @@
 #define MACB_TX_ERR_FLAGS      (MACB_BIT(ISR_TUND)                     \
                                        | MACB_BIT(ISR_RLE)             \
                                        | MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)    \
+                                       | MACB_BIT(TXUBR))
 
 /* Max length of transmit frame must be a multiple of 8 bytes */
 #define MACB_TX_LEN_ALIGN      8
@@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
        if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
                desc_64 = macb_64b_desc(bp, desc);
                desc_64->addrh = upper_32_bits(addr);
+               /* The low bits of RX address contain the RX_USED bit, clearing
+                * of which allows packet RX. Make sure the high bits are also
+                * visible to HW at that point.
+                */
+               dma_wmb();
        }
 #endif
        desc->addr = lower_32_bits(addr);
@@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
 
                        if (entry == bp->rx_ring_size - 1)
                                paddr |= MACB_BIT(RX_WRAP);
-                       macb_set_addr(bp, desc, paddr);
                        desc->ctrl = 0;
+                       /* Setting addr clears RX_USED and allows reception,
+                        * make sure ctrl is cleared first to avoid a race.
+                        */
+                       dma_wmb();
+                       macb_set_addr(bp, desc, paddr);
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
                } else {
-                       desc->addr &= ~MACB_BIT(RX_USED);
                        desc->ctrl = 0;
+                       dma_wmb();
+                       desc->addr &= ~MACB_BIT(RX_USED);
                }
        }
 
@@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
 
                rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
                addr = macb_get_addr(bp, desc);
-               ctrl = desc->ctrl;
 
                if (!rxused)
                        break;
 
+               /* Ensure ctrl is at least as up-to-date as rxused */
+               dma_rmb();
+
+               ctrl = desc->ctrl;
+
                queue->rx_tail++;
                count++;
 
@@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               ctrl = desc->ctrl;
-
                if (!(desc->addr & MACB_BIT(RX_USED)))
                        break;
 
+               /* Ensure ctrl is at least as up-to-date as addr */
+               dma_rmb();
+
+               ctrl = desc->ctrl;
+
                if (ctrl & MACB_BIT(RX_SOF)) {
                        if (first_frag != -1)
                                discard_partial_frame(queue, first_frag, tail);
@@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
        netif_tx_start_all_queues(dev);
 }
 
+static void macb_tx_restart(struct macb_queue *queue)
+{
+       unsigned int head = queue->tx_head;
+       unsigned int tail = queue->tx_tail;
+       struct macb *bp = queue->bp;
+
+       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+               queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+       if (head == tail)
+               return;
+
+       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
        struct macb_queue *queue = dev_id;
@@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                if (status & MACB_BIT(TCOMP))
                        macb_tx_interrupt(queue);
 
+               if (status & MACB_BIT(TXUBR))
+                       macb_tx_restart(queue);
+
                /* Link change detection isn't possible with RMII, so we'll
                 * add that if/when we get our hands on a full-blown MII PHY.
                 */
index cd5296b842290302a5904b4c590e51fdea741bbd..a6dc47edc4cf6431066850770928640670e20bb1 100644 (file)
@@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
        desc_ptp = macb_ptp_desc(queue->bp, desc);
        tx_timestamp = &queue->tx_timestamps[head];
        tx_timestamp->skb = skb;
+       /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+       dma_rmb();
        tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
        tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
        /* move head */
index f152da1ce0464c5c065010813213e5f60eb111af..c62a0c830705cc71681890bbcf17223de16bdbac 100644 (file)
@@ -1453,6 +1453,9 @@ struct cpl_tx_data {
 #define T6_TX_FORCE_V(x)       ((x) << T6_TX_FORCE_S)
 #define T6_TX_FORCE_F          T6_TX_FORCE_V(1U)
 
+#define TX_URG_S    16
+#define TX_URG_V(x) ((x) << TX_URG_S)
+
 #define TX_SHOVE_S    14
 #define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
 
index b52029e26d15323b98811c5180a3d78ac288bf52..ad1779fc410e64b668bf9bdf5dc037eb46a737fa 100644 (file)
@@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
 
        hns_ae_ring_enable_all(handle, 0);
 
+       /* clean rx fbd. */
+       hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
        (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
 }
 
index aaf72c055711ce2051d982c3f91b4fb674117a29..1790cdafd9b823ec32e10e7a301c3e259a4d467a 100644 (file)
@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
        /*enable GE rX/tX */
-       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
 
-       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+               /* enable rx pcs */
+               dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+       }
 }
 
 static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
        struct mac_driver *drv = (struct mac_driver *)mac_drv;
 
        /*disable GE rX/tX */
-       if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
 
-       if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+       if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+               /* disable rx pcs */
+               dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
                dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+       }
 }
 
 /* hns_gmac_get_en - get port enable
index 3613e400e816d9fd6478f5328e11a13a7e08e802..a97228c93831d69fe2211317486f14a25197e740 100644 (file)
@@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
        return rc;
 }
 
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+       if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+               return;
+
+       phy_device_remove(mac_cb->phy_dev);
+       phy_device_free(mac_cb->phy_dev);
+
+       mac_cb->phy_dev = NULL;
+}
+
 #define MAC_MEDIA_TYPE_MAX_LEN         16
 
 static const struct {
@@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
        int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
 
        for (i = 0; i < max_port_num; i++) {
+               if (!dsaf_dev->mac_cb[i])
+                       continue;
+
                dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+               hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
                dsaf_dev->mac_cb[i] = NULL;
        }
 }
index e557a4ef5996c6772804ca746830af633a4adca9..3b9e74be5fbd27309fb00e1d36195f619c6a8ac6 100644 (file)
@@ -934,6 +934,62 @@ static void hns_dsaf_tcam_mc_cfg(
        spin_unlock_bh(&dsaf_dev->tcam_lock);
 }
 
+/**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+                                      u32 address,
+                                      struct dsaf_tbl_tcam_data *tcam_data,
+                                      struct dsaf_tbl_tcam_data *tcam_mask,
+                                      struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+       spin_lock_bh(&dsaf_dev->tcam_lock);
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+       hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+       hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+       /*Restore Match Data*/
+       tcam_mask->tbl_tcam_data_high = 0xffffffff;
+       tcam_mask->tbl_tcam_data_low = 0xffffffff;
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+       spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+                                      u32 address,
+                                      struct dsaf_tbl_tcam_data *tcam_data,
+                                      struct dsaf_tbl_tcam_data *tcam_mask,
+                                      struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+       spin_lock_bh(&dsaf_dev->tcam_lock);
+       hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+       hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+       hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+       hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+       /*Restore Match Data*/
+       tcam_mask->tbl_tcam_data_high = 0xffffffff;
+       tcam_mask->tbl_tcam_data_low = 0xffffffff;
+       hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+       spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
 /**
  * hns_dsaf_tcam_mc_invld - INT
  * @dsaf_id: dsa fabric id
@@ -1492,6 +1548,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
        return DSAF_INVALID_ENTRY_IDX;
 }
 
+/**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+       struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       int i;
+
+       soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+       for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+               /* search all entry from end to start.*/
+               if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+                       return i;
+               soft_mac_entry--;
+       }
+       return DSAF_INVALID_ENTRY_IDX;
+}
+
 /**
  * hns_dsaf_set_mac_key - set mac key
  * @dsaf_dev: dsa fabric device struct pointer
@@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
                DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
 
        hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
-               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+               DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
        hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
-               DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+               DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
 
        /* pfc pause frame statistics stored in dsaf inode*/
        if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
                                DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
                p[223 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
-               p[224 + i] = dsaf_read_dev(ddev,
+               p[226 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
        }
 
-       p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+       p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
 
        for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[228 + i] = dsaf_read_dev(ddev,
+               p[230 + i] = dsaf_read_dev(ddev,
                                DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
        }
 
-       p[231] = dsaf_read_dev(ddev,
-               DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+       p[233] = dsaf_read_dev(ddev,
+               DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
 
        /* dsaf inode registers */
        for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[232 + i] = dsaf_read_dev(ddev,
+               p[234 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_CFG_REG_0_REG + j * 0x80);
-               p[235 + i] = dsaf_read_dev(ddev,
+               p[237 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
-               p[238 + i] = dsaf_read_dev(ddev,
+               p[240 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
-               p[241 + i] = dsaf_read_dev(ddev,
+               p[243 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
-               p[244 + i] = dsaf_read_dev(ddev,
+               p[246 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
-               p[245 + i] = dsaf_read_dev(ddev,
+               p[249 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
-               p[248 + i] = dsaf_read_dev(ddev,
+               p[252 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
-               p[251 + i] = dsaf_read_dev(ddev,
+               p[255 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
-               p[254 + i] = dsaf_read_dev(ddev,
+               p[258 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
-               p[257 + i] = dsaf_read_dev(ddev,
+               p[261 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
-               p[260 + i] = dsaf_read_dev(ddev,
+               p[264 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_INER_ST_0_REG + j * 0x80);
-               p[263 + i] = dsaf_read_dev(ddev,
+               p[267 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
-               p[266 + i] = dsaf_read_dev(ddev,
+               p[270 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
-               p[269 + i] = dsaf_read_dev(ddev,
+               p[273 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
-               p[272 + i] = dsaf_read_dev(ddev,
+               p[276 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
-               p[275 + i] = dsaf_read_dev(ddev,
+               p[279 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
-               p[278 + i] = dsaf_read_dev(ddev,
+               p[282 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
-               p[281 + i] = dsaf_read_dev(ddev,
+               p[285 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
-               p[284 + i] = dsaf_read_dev(ddev,
+               p[288 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
-               p[287 + i] = dsaf_read_dev(ddev,
+               p[291 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
-               p[290 + i] = dsaf_read_dev(ddev,
+               p[294 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
-               p[293 + i] = dsaf_read_dev(ddev,
+               p[297 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
-               p[296 + i] = dsaf_read_dev(ddev,
+               p[300 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
-               p[299 + i] = dsaf_read_dev(ddev,
+               p[303 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
-               p[302 + i] = dsaf_read_dev(ddev,
+               p[306 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
-               p[305 + i] = dsaf_read_dev(ddev,
+               p[309 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
-               p[308 + i] = dsaf_read_dev(ddev,
+               p[312 + i] = dsaf_read_dev(ddev,
                                DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
        }
 
        /* dsaf onode registers */
        for (i = 0; i < DSAF_XOD_NUM; i++) {
-               p[311 + i] = dsaf_read_dev(ddev,
+               p[315 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
-               p[319 + i] = dsaf_read_dev(ddev,
+               p[323 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
-               p[327 + i] = dsaf_read_dev(ddev,
+               p[331 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
-               p[335 + i] = dsaf_read_dev(ddev,
+               p[339 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
-               p[343 + i] = dsaf_read_dev(ddev,
+               p[347 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
-               p[351 + i] = dsaf_read_dev(ddev,
+               p[355 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
        }
 
-       p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
-       p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
-       p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+       p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+       p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+       p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
 
        for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
                j = i * DSAF_COMM_CHN + port;
-               p[362 + i] = dsaf_read_dev(ddev,
+               p[366 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_GNT_L_0_REG + j * 0x90);
-               p[365 + i] = dsaf_read_dev(ddev,
+               p[369 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_GNT_H_0_REG + j * 0x90);
-               p[368 + i] = dsaf_read_dev(ddev,
+               p[372 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
-               p[371 + i] = dsaf_read_dev(ddev,
+               p[375 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
-               p[374 + i] = dsaf_read_dev(ddev,
+               p[378 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
-               p[377 + i] = dsaf_read_dev(ddev,
+               p[381 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
-               p[380 + i] = dsaf_read_dev(ddev,
+               p[384 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
-               p[383 + i] = dsaf_read_dev(ddev,
+               p[387 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
-               p[386 + i] = dsaf_read_dev(ddev,
+               p[390 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
-               p[389 + i] = dsaf_read_dev(ddev,
+               p[393 + i] = dsaf_read_dev(ddev,
                                DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
        }
 
-       p[392] = dsaf_read_dev(ddev,
+       p[396] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[393] = dsaf_read_dev(ddev,
+       p[397] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[394] = dsaf_read_dev(ddev,
+       p[398] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
-       p[395] = dsaf_read_dev(ddev,
+       p[399] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
-       p[396] = dsaf_read_dev(ddev,
+       p[400] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
-       p[397] = dsaf_read_dev(ddev,
+       p[401] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
-       p[398] = dsaf_read_dev(ddev,
+       p[402] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
-       p[399] = dsaf_read_dev(ddev,
+       p[403] = dsaf_read_dev(ddev,
                DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
-       p[400] = dsaf_read_dev(ddev,
+       p[404] = dsaf_read_dev(ddev,
                DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[401] = dsaf_read_dev(ddev,
+       p[405] = dsaf_read_dev(ddev,
                DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[402] = dsaf_read_dev(ddev,
+       p[406] = dsaf_read_dev(ddev,
                DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
-       p[403] = dsaf_read_dev(ddev,
+       p[407] = dsaf_read_dev(ddev,
                DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
-       p[404] = dsaf_read_dev(ddev,
+       p[408] = dsaf_read_dev(ddev,
                DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
 
        /* dsaf voq registers */
        for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
                j = (i * DSAF_COMM_CHN + port) * 0x90;
-               p[405 + i] = dsaf_read_dev(ddev,
+               p[409 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
-               p[408 + i] = dsaf_read_dev(ddev,
+               p[412 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
-               p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
-               p[414 + i] = dsaf_read_dev(ddev,
+               p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+               p[418 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
-               p[417 + i] = dsaf_read_dev(ddev,
+               p[421 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
-               p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
-               p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
-               p[426 + i] = dsaf_read_dev(ddev,
+               p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+               p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+               p[430 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
-               p[429 + i] = dsaf_read_dev(ddev,
+               p[433 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
-               p[432 + i] = dsaf_read_dev(ddev,
+               p[436 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
-               p[435 + i] = dsaf_read_dev(ddev,
+               p[439 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
-               p[438 + i] = dsaf_read_dev(ddev,
+               p[442 + i] = dsaf_read_dev(ddev,
                        DSAF_VOQ_BP_ALL_THRD_0_REG + j);
        }
 
        /* dsaf tbl registers */
-       p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
-       p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
-       p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
-       p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
-       p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
-       p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
-       p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
-       p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
-       p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
-       p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
-       p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
-       p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
-       p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
-       p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
-       p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
-       p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
-       p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
-       p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
-       p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
-       p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
-       p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
-       p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
-       p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+       p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+       p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+       p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+       p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+       p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+       p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+       p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+       p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+       p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+       p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+       p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+       p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+       p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+       p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+       p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+       p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+       p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+       p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+       p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+       p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+       p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+       p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+       p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
 
        for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
                j = i * 0x8;
-               p[464 + 2 * i] = dsaf_read_dev(ddev,
+               p[468 + 2 * i] = dsaf_read_dev(ddev,
                        DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
-               p[465 + 2 * i] = dsaf_read_dev(ddev,
+               p[469 + 2 * i] = dsaf_read_dev(ddev,
                        DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
        }
 
-       p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
-       p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
-       p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
-       p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
-       p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
-       p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
-       p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
-       p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
-       p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
-       p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
-       p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
-       p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+       p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+       p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+       p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+       p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+       p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+       p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+       p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+       p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+       p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+       p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+       p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+       p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
 
        /* dsaf other registers */
-       p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
-       p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
-       p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
-       p[495] = dsaf_read_dev(ddev,
+       p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+       p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+       p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+       p[499] = dsaf_read_dev(ddev,
                DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
-       p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
-       p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+       p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+       p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
 
        if (!is_ver1)
-               p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+               p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
 
        /* mark end of dsaf regs */
-       for (i = 499; i < 504; i++)
+       for (i = 503; i < 504; i++)
                p[i] = 0xdddddddd;
 }
 
@@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
        return DSAF_DUMP_REGS_NUM;
 }
 
-/* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
-       (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
-void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
-                              u32 port, bool enable)
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
 {
+       struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+       struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+       struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+       struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
        struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
-       struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
-       u16 entry_index;
-       struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
-       struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
+       struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+       struct dsaf_drv_mac_single_dest_entry mask_entry;
+       struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       struct hns_mac_cb *mac_cb;
+       u8 addr[ETH_ALEN] = {0};
+       u8 port_num;
+       u16 mskid;
+
+       /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+       if (entry_index != DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* put promisc tcam entry in the end. */
+       /* 1. set promisc unicast vague tcam entry. */
+       entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               dev_err(dsaf_dev->dev,
+                       "enable uc promisc failed (port:%#x)\n",
+                       port);
+               return;
+       }
+
+       mac_cb = dsaf_dev->mac_cb[port];
+       (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+       tbl_tcam_ucast.tbl_ucast_out_port = port_num;
 
-       if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
+       /* config uc vague table */
+       hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+                                  &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+       /* update software entry */
+       soft_mac_entry = priv->soft_mac_tbl;
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+       soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+       /* step back to the START for mc. */
+       soft_mac_entry = priv->soft_mac_tbl;
+
+       /* 2. set promisc multicast vague tcam entry. */
+       entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+       if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+               dev_err(dsaf_dev->dev,
+                       "enable mc promisc failed (port:%#x)\n",
+                       port);
                return;
+       }
+
+       memset(&mask_entry, 0x0, sizeof(mask_entry));
+       memset(&mask_key, 0x0, sizeof(mask_key));
+       memset(&temp_key, 0x0, sizeof(temp_key));
+       mask_entry.addr[0] = 0x01;
+       hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+                            port, mask_entry.addr);
+       tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+       tbl_tcam_mcast.tbl_mcast_old_en = 0;
 
-       /* find the tcam entry index for promisc */
-       entry_index = dsaf_promisc_tcam_entry(port);
-
-       memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-       memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
-       /* config key mask */
-       if (enable) {
-               dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
-                              DSAF_TBL_TCAM_KEY_PORT_M,
-                              DSAF_TBL_TCAM_KEY_PORT_S, port);
-               dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
-                              DSAF_TBL_TCAM_KEY_PORT_M,
-                              DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
-               /* SUB_QID */
-               dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
-                            DSAF_SERVICE_NW_NUM, true);
-               mac_data.tbl_mcast_item_vld = true;     /* item_vld bit */
+       if (port < DSAF_SERVICE_NW_NUM) {
+               mskid = port;
+       } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+               mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
        } else {
-               mac_data.tbl_mcast_item_vld = false;    /* item_vld bit */
+               dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+                       dsaf_dev->ae_dev.name, port,
+                       mask_key.high.val, mask_key.low.val);
+               return;
        }
 
-       dev_dbg(dsaf_dev->dev,
-               "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
-               dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
-               tbl_tcam_data.low.val, entry_index);
+       dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+                    mskid % 32, 1);
+       memcpy(&temp_key, &mask_key, sizeof(mask_key));
+       hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+                                  (struct dsaf_tbl_tcam_data *)(&mask_key),
+                                  &tbl_tcam_mcast);
+
+       /* update software entry */
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = entry_index;
+       soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+       soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
 
-       /* config promisc entry with mask */
-       hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
-                            (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
-                            (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
-                            &mac_data);
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+       struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+       struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+       struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+       struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+       struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+       struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+       struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+       u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+       struct dsaf_drv_tbl_tcam_key mac_key;
+       u8 addr[ETH_ALEN] = {0};
 
-       /* config software entry */
+       /* 1. delete uc vague tcam entry. */
+       /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+       if (entry_index == DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* config uc vague table */
+       hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+                                  &tbl_tcam_mask, &tbl_tcam_ucast);
+       /* update soft management table. */
+       soft_mac_entry = priv->soft_mac_tbl;
+       soft_mac_entry += entry_index;
+       soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+       /* step back to the START for mc. */
+       soft_mac_entry = priv->soft_mac_tbl;
+
+       /* 2. delete mc vague tcam entry. */
+       addr[0] = 0x01;
+       memset(&mac_key, 0x0, sizeof(mac_key));
+       hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+       entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+       if (entry_index == DSAF_INVALID_ENTRY_IDX)
+               return;
+
+       /* config mc vague table */
+       hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+                                  &tbl_tcam_mask, &tbl_tcam_mcast);
+       /* update soft management table. */
        soft_mac_entry += entry_index;
-       soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+       soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+                              u32 port, bool enable)
+{
+       if (enable)
+               set_promisc_tcam_enable(dsaf_dev, port);
+       else
+               set_promisc_tcam_disable(dsaf_dev, port);
 }
 
 int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
index 74d935d82cbc6050a287a07532024675ce75254e..b9733b0b848263bc9a25ccf42f3d8b433a7b9e5e 100644 (file)
 #define DSAF_INODE_IN_DATA_STP_DISC_0_REG      0x1A50
 #define DSAF_INODE_GE_FC_EN_0_REG              0x1B00
 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG                0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG                0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG                0x103C
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG      0x1C00
 #define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET   0x100
 #define DSAF_INODE_IN_PRIO_PAUSE_OFFSET                0x50
 #define RCB_ECC_ERR_ADDR4_REG                  0x460
 #define RCB_ECC_ERR_ADDR5_REG                  0x464
 
-#define RCB_COM_SF_CFG_INTMASK_RING            0x480
-#define RCB_COM_SF_CFG_RING_STS                        0x484
-#define RCB_COM_SF_CFG_RING                    0x488
-#define RCB_COM_SF_CFG_INTMASK_BD              0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS             0x470
+#define RCB_COM_SF_CFG_INTMASK_RING            0x470
+#define RCB_COM_SF_CFG_RING_STS                        0x474
+#define RCB_COM_SF_CFG_RING                    0x478
+#define RCB_COM_SF_CFG_INTMASK_BD              0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS             0x480
 #define RCB_COM_RCB_RD_BD_BUSY                 0x490
 #define RCB_COM_RCB_FBD_CRT_EN                 0x494
 #define RCB_COM_AXI_WR_ERR_INTMASK             0x498
 #define GMAC_LD_LINK_COUNTER_REG               0x01D0UL
 #define GMAC_LOOP_REG                          0x01DCUL
 #define GMAC_RECV_CONTROL_REG                  0x01E0UL
+#define GMAC_PCS_RX_EN_REG                     0x01E4UL
 #define GMAC_VLAN_CODE_REG                     0x01E8UL
 #define GMAC_RX_OVERRUN_CNT_REG                        0x01ECUL
 #define GMAC_RX_LENGTHFIELD_ERR_CNT_REG                0x01F4UL
index 28e907831b0eddbf760e0edb579ae7ae708520e0..6242249c9f4c544450d17808939e9f4868efee84 100644 (file)
@@ -1186,6 +1186,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
        if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
                phy_dev->autoneg = false;
 
+       if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+               phy_stop(phy_dev);
+
        return 0;
 }
 
@@ -1281,6 +1284,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
        return cpu;
 }
 
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < q_num * 2; i++) {
+               if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+                       irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+                                             NULL);
+                       free_irq(priv->ring_data[i].ring->irq,
+                                &priv->ring_data[i]);
+                       priv->ring_data[i].ring->irq_init_flag =
+                               RCB_IRQ_NOT_INITED;
+               }
+       }
+}
+
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
 {
        struct hnae_handle *h = priv->ae_handle;
@@ -1306,7 +1325,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
                if (ret) {
                        netdev_err(priv->netdev, "request irq(%d) fail\n",
                                   rd->ring->irq);
-                       return ret;
+                       goto out_free_irq;
                }
                disable_irq(rd->ring->irq);
 
@@ -1321,6 +1340,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
        }
 
        return 0;
+
+out_free_irq:
+       hns_nic_free_irq(h->q_num, priv);
+       return ret;
 }
 
 static int hns_nic_net_up(struct net_device *ndev)
@@ -1330,6 +1353,9 @@ static int hns_nic_net_up(struct net_device *ndev)
        int i, j;
        int ret;
 
+       if (!test_bit(NIC_STATE_DOWN, &priv->state))
+               return 0;
+
        ret = hns_nic_init_irq(priv);
        if (ret != 0) {
                netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1365,6 +1391,7 @@ out_has_some_queues:
        for (j = i - 1; j >= 0; j--)
                hns_nic_ring_close(ndev, j);
 
+       hns_nic_free_irq(h->q_num, priv);
        set_bit(NIC_STATE_DOWN, &priv->state);
 
        return ret;
@@ -1482,11 +1509,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
 }
 
 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
 static void hns_nic_net_timeout(struct net_device *ndev)
 {
        struct hns_nic_priv *priv = netdev_priv(ndev);
 
-       hns_tx_timeout_reset(priv);
+       if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+               ndev->watchdog_timeo *= 2;
+               netdev_info(ndev, "watchdog_timo changed to %d.\n",
+                           ndev->watchdog_timeo);
+       } else {
+               ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+               hns_tx_timeout_reset(priv);
+       }
 }
 
 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2049,11 +2084,11 @@ static void hns_nic_service_task(struct work_struct *work)
                = container_of(work, struct hns_nic_priv, service_task);
        struct hnae_handle *h = priv->ae_handle;
 
+       hns_nic_reset_subtask(priv);
        hns_nic_update_link_status(priv->netdev);
        h->dev->ops->update_led_status(h);
        hns_nic_update_stats(priv->netdev);
 
-       hns_nic_reset_subtask(priv);
        hns_nic_service_event_complete(priv);
 }
 
@@ -2339,7 +2374,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
        ndev->min_mtu = MAC_MIN_MTU;
        switch (priv->enet_ver) {
        case AE_VERSION_2:
-               ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+               ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
                ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
                        NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
index ed50b8dee44f3a8699ca0a226d81f482cdd0f3c0..67cc6d9c8fd7257af3e3c592fd96a636cc8be326 100644 (file)
@@ -1939,8 +1939,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
 {
        struct ibmvnic_rwi *rwi;
+       unsigned long flags;
 
-       mutex_lock(&adapter->rwi_lock);
+       spin_lock_irqsave(&adapter->rwi_lock, flags);
 
        if (!list_empty(&adapter->rwi_list)) {
                rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1950,7 +1951,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
                rwi = NULL;
        }
 
-       mutex_unlock(&adapter->rwi_lock);
+       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
        return rwi;
 }
 
@@ -2025,6 +2026,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        struct list_head *entry, *tmp_entry;
        struct ibmvnic_rwi *rwi, *tmp;
        struct net_device *netdev = adapter->netdev;
+       unsigned long flags;
        int ret;
 
        if (adapter->state == VNIC_REMOVING ||
@@ -2041,21 +2043,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
                goto err;
        }
 
-       mutex_lock(&adapter->rwi_lock);
+       spin_lock_irqsave(&adapter->rwi_lock, flags);
 
        list_for_each(entry, &adapter->rwi_list) {
                tmp = list_entry(entry, struct ibmvnic_rwi, list);
                if (tmp->reset_reason == reason) {
                        netdev_dbg(netdev, "Skipping matching reset\n");
-                       mutex_unlock(&adapter->rwi_lock);
+                       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
                        ret = EBUSY;
                        goto err;
                }
        }
 
-       rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+       rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
        if (!rwi) {
-               mutex_unlock(&adapter->rwi_lock);
+               spin_unlock_irqrestore(&adapter->rwi_lock, flags);
                ibmvnic_close(netdev);
                ret = ENOMEM;
                goto err;
@@ -2069,7 +2071,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
        }
        rwi->reset_reason = reason;
        list_add_tail(&rwi->list, &adapter->rwi_list);
-       mutex_unlock(&adapter->rwi_lock);
+       spin_unlock_irqrestore(&adapter->rwi_lock, flags);
        adapter->resetting = true;
        netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
        schedule_work(&adapter->ibmvnic_reset);
@@ -4759,7 +4761,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
-       mutex_init(&adapter->rwi_lock);
+       spin_lock_init(&adapter->rwi_lock);
        adapter->resetting = false;
 
        adapter->mac_change_pending = false;
index 99c4f8d331ce7c489c4b3badb1d5fbbe57ff8dea..f2018dbebfa527684a2dd8f91aeedfd1413d7fa1 100644 (file)
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
        struct tasklet_struct tasklet;
        enum vnic_state state;
        enum ibmvnic_reset_reason reset_reason;
-       struct mutex rwi_lock;
+       spinlock_t rwi_lock;
        struct list_head rwi_list;
        struct work_struct ibmvnic_reset;
        bool resetting;
index a3f45335437c3cecde089e44c3a13a3932001960..0e5dc74b4ef229cc6874e9fefce4a6d9f3ffff94 100644 (file)
@@ -1543,17 +1543,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
        /* Copy the address first, so that we avoid a possible race with
-        * .set_rx_mode(). If we copy after changing the address in the filter
-        * list, we might open ourselves to a narrow race window where
-        * .set_rx_mode could delete our dev_addr filter and prevent traffic
-        * from passing.
+        * .set_rx_mode().
+        * - Remove old address from MAC filter
+        * - Copy new address
+        * - Add new address to MAC filter
         */
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        i40e_del_mac_filter(vsi, netdev->dev_addr);
-       i40e_add_mac_filter(vsi, addr->sa_data);
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       i40e_add_mac_filter(vsi, netdev->dev_addr);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
 
index aef3c89ee79c4e7384e0713c55b12090c1c36f60..d0a95424ce58eee4cd1510d7815258bc774ae03e 100644 (file)
@@ -1558,24 +1558,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
        return true;
 }
 
-/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring:  rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-void i40e_receive_skb(struct i40e_ring *rx_ring,
-                     struct sk_buff *skb, u16 vlan_tag)
-{
-       struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
-       if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-           (vlan_tag & VLAN_VID_MASK))
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-       napi_gro_receive(&q_vector->napi, skb);
-}
-
 /**
  * i40e_alloc_rx_buffers - Replace used receive buffers
  * @rx_ring: ring to place buffers on
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
  * other fields within the skb.
  **/
 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
-                            u8 rx_ptype)
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb)
 {
        u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
        u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
        u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
        u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
                   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+       u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+                     I40E_RXD_QW1_PTYPE_SHIFT;
 
        if (unlikely(tsynvalid))
                i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
+       if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+               u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      le16_to_cpu(vlan_tag));
+       }
+
        /* modifies the skb - consumes the enet header */
        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                struct i40e_rx_buffer *rx_buffer;
                union i40e_rx_desc *rx_desc;
                unsigned int size;
-               u16 vlan_tag;
-               u8 rx_ptype;
                u64 qword;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
 
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-
                /* populate checksum, VLAN, and protocol */
-               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
-               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+               i40e_process_skb_fields(rx_ring, rx_desc, skb);
 
                i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
+               napi_gro_receive(&rx_ring->q_vector->napi, skb);
                skb = NULL;
 
                /* update budget accounting */
index 09809dffe39931a843b11204d3480c30e1f68554..8af0e99c6c0d89b9e9aa7f59b3c6eaaa8ccdb7fb 100644 (file)
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
        union i40e_rx_desc *rx_desc,
        u64 qw);
 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
-                            union i40e_rx_desc *rx_desc, struct sk_buff *skb,
-                            u8 rx_ptype);
-void i40e_receive_skb(struct i40e_ring *rx_ring,
-                     struct sk_buff *skb, u16 vlan_tag);
+                            union i40e_rx_desc *rx_desc, struct sk_buff *skb);
 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
                          unsigned int total_rx_bytes,
index 433c8e688c78d5623e65fe5edefeae4cc22d67a8..870cf654e4364480e41887ec0e0d054a05f4e25a 100644 (file)
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                struct i40e_rx_buffer *bi;
                union i40e_rx_desc *rx_desc;
                unsigned int size;
-               u16 vlan_tag;
-               u8 rx_ptype;
                u64 qword;
 
                if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-               rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
-                          I40E_RXD_QW1_PTYPE_SHIFT;
-               i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
-               vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
-                          le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
-               i40e_receive_skb(rx_ring, skb, vlan_tag);
+               i40e_process_skb_fields(rx_ring, rx_desc, skb);
+               napi_gro_receive(&rx_ring->q_vector->napi, skb);
        }
 
        i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
index 5dacfc870259881f8746a72546f5c410f4bf06f6..345701af7749c2e983813b76589baf7e79202fc5 100644 (file)
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
        u8 num_tcs = adapter->hw_tcs;
        u32 reg_val;
        u32 queue;
-       u32 word;
 
        /* remove VLAN filters beloning to this VF */
        ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                }
        }
 
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 word;
+
        /* Clear VF's mailbox memory */
        for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
        /* reset the filters for the device */
        ixgbe_vf_reset_event(adapter, vf);
 
+       ixgbe_vf_clear_mbx(adapter, vf);
+
        /* set vf mac address */
        if (!is_zero_ether_addr(vf_mac))
                ixgbe_set_vf_mac(adapter, vf, vf_mac);
index e5397c8197b9c3713c48e925dad0dfcee732c0b9..61b23497f83692fc164dd6ace1e0948b6eece467 100644 (file)
@@ -408,7 +408,6 @@ struct mvneta_port {
        struct mvneta_pcpu_stats __percpu       *stats;
 
        int pkt_size;
-       unsigned int frag_size;
        void __iomem *base;
        struct mvneta_rx_queue *rxqs;
        struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
        if (!pp->bm_priv) {
                /* Set Offset */
                mvneta_rxq_offset_set(pp, rxq, 0);
-               mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+               mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+                                       PAGE_SIZE :
+                                       MVNETA_RX_BUF_SIZE(pp->pkt_size));
                mvneta_rxq_bm_disable(pp, rxq);
                mvneta_rxq_fill(pp, rxq, rxq->size);
        } else {
@@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
        int ret;
 
        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
-       pp->frag_size = PAGE_SIZE;
 
        ret = mvneta_setup_rxqs(pp);
        if (ret)
index 125ea99418df6915da4c5ad9427802e0aeb738f9..f1dab0b55769974a193684182e12a499fee9706e 100644 (file)
@@ -4405,12 +4405,15 @@ static void mvpp2_phylink_validate(struct net_device *dev,
        case PHY_INTERFACE_MODE_10GKR:
        case PHY_INTERFACE_MODE_XAUI:
        case PHY_INTERFACE_MODE_NA:
-               phylink_set(mask, 10000baseCR_Full);
-               phylink_set(mask, 10000baseSR_Full);
-               phylink_set(mask, 10000baseLR_Full);
-               phylink_set(mask, 10000baseLRM_Full);
-               phylink_set(mask, 10000baseER_Full);
-               phylink_set(mask, 10000baseKR_Full);
+               if (port->gop_id == 0) {
+                       phylink_set(mask, 10000baseT_Full);
+                       phylink_set(mask, 10000baseCR_Full);
+                       phylink_set(mask, 10000baseSR_Full);
+                       phylink_set(mask, 10000baseLR_Full);
+                       phylink_set(mask, 10000baseLRM_Full);
+                       phylink_set(mask, 10000baseER_Full);
+                       phylink_set(mask, 10000baseKR_Full);
+               }
                /* Fall-through */
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -4421,7 +4424,6 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 10baseT_Full);
                phylink_set(mask, 100baseT_Half);
                phylink_set(mask, 100baseT_Full);
-               phylink_set(mask, 10000baseT_Full);
                /* Fall-through */
        case PHY_INTERFACE_MODE_1000BASEX:
        case PHY_INTERFACE_MODE_2500BASEX:
index 25c1c4f96841244336c3257abbd213707655ad51..f480763dcd0db1f16d0fcb1da7f61172dd8a5bf7 100644 (file)
@@ -1190,11 +1190,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
                              struct ethtool_ts_info *info)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
-       int ret;
-
-       ret = ethtool_op_get_ts_info(priv->netdev, info);
-       if (ret)
-               return ret;
 
        info->phc_index = mlx5_clock_get_ptp_index(mdev);
 
@@ -1202,9 +1197,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
            info->phc_index == -1)
                return 0;
 
-       info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
-                                SOF_TIMESTAMPING_RX_HARDWARE |
-                                SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
 
        info->tx_types = BIT(HWTSTAMP_TX_OFF) |
                         BIT(HWTSTAMP_TX_ON);
index 871313d6b34d1b315e6ef1a9c07cba396de14186..b70cb6fd164c4fb23f93653e17b08c3fb0d8204d 100644 (file)
@@ -128,6 +128,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
        return !params->lro_en && frag_sz <= PAGE_SIZE;
 }
 
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+                                         MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
                                         struct mlx5e_params *params)
 {
@@ -138,6 +140,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
        if (!mlx5e_rx_is_linear_skb(mdev, params))
                return false;
 
+       if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+               return false;
+
        if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
                return true;
 
@@ -1396,6 +1401,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
        struct mlx5_core_dev *mdev = c->mdev;
        struct mlx5_rate_limit rl = {0};
 
+       cancel_work_sync(&sq->dim.work);
        mlx5e_destroy_sq(mdev, sq->sqn);
        if (sq->rate_limit) {
                rl.rate = sq->rate_limit;
index c3c657548824117f1732f3c7d14158c9a2a8e2a0..820fe85100b08dffd406a863f2e1f50f7879cfda 100644 (file)
@@ -46,6 +46,7 @@
 
 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
        max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
 
 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
 
@@ -466,8 +467,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        ASSERT_RTNL();
 
-       if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
-           !ether_addr_equal(e->h_dest, ha))
+       if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+           (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
                mlx5e_tc_encap_flows_del(priv, e);
 
        if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
@@ -1083,9 +1084,7 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
        if (err)
                return err;
 
-
-       priv->channels.params.num_channels =
-                               mlx5e_get_netdev_max_channels(netdev);
+       priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
 
        mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
        mlx5e_build_rep_netdev(netdev);
index 624eed345b5d2b19fa5ed54935667b41090383f8..0b5ef6d4e81586fab515de64843a44465ec757fa 100644 (file)
@@ -1190,7 +1190,7 @@ mpwrq_cqe_out:
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 {
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-       struct mlx5e_xdpsq *xdpsq;
+       struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
        struct mlx5_cqe64 *cqe;
        int work_done = 0;
 
@@ -1201,10 +1201,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
        cqe = mlx5_cqwq_get_cqe(&cq->wq);
-       if (!cqe)
+       if (!cqe) {
+               if (unlikely(work_done))
+                       goto out;
                return 0;
-
-       xdpsq = &rq->xdpsq;
+       }
 
        do {
                if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1219,6 +1220,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
                rq->handle_rx_cqe(rq, cqe);
        } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
+out:
        if (xdpsq->doorbell) {
                mlx5e_xmit_xdp_doorbell(xdpsq);
                xdpsq->doorbell = false;
index 3e99d0728b2f2c5366a13f01400d4354d3c80b2c..4337afd610d78daba92068b16669a586aea65942 100644 (file)
@@ -74,7 +74,6 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -198,7 +197,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_nop               += sq_stats->nop;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
-                       s->tx_udp_seg_rem       += sq_stats->udp_seg_rem;
                        s->tx_queue_dropped     += sq_stats->dropped;
                        s->tx_cqe_err           += sq_stats->cqe_err;
                        s->tx_recover           += sq_stats->recover;
index 3f8e870ef4c903bbca01e894bc2608bbb8ead182..3ff69ddae2d3172fd7872bbbce681b9179a3c9f7 100644 (file)
@@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
        u64 tx_recover;
        u64 tx_cqes;
        u64 tx_queue_wake;
-       u64 tx_udp_seg_rem;
        u64 tx_cqe_err;
        u64 tx_xdp_xmit;
        u64 tx_xdp_full;
@@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
        u64 csum_partial_inner;
        u64 added_vlan_packets;
        u64 nop;
-       u64 udp_seg_rem;
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_ooo;
        u64 tls_resync_bytes;
index fca6f4132c91a51ac2a03eaaf64b0bbc5b6ff2c3..9dabe9d4b2798bc0b41f77761b8fb1126279ea39 100644 (file)
@@ -870,9 +870,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
        struct mlx5_flow_handle *rule;
 
        memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
-       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-       slow_attr->mirror_count = 0,
-       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       slow_attr->mirror_count = 0;
+       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
 
        rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
        if (!IS_ERR(rule))
@@ -887,6 +887,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
                                  struct mlx5_esw_flow_attr *slow_attr)
 {
        memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       slow_attr->mirror_count = 0;
+       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
        mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
        flow->flags &= ~MLX5E_TC_FLOW_SLOW;
 }
@@ -907,11 +910,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        struct mlx5e_priv *out_priv;
        int err = 0, encap_err = 0;
 
-       /* if prios are not supported, keep the old behaviour of using same prio
-        * for all offloaded rules.
-        */
-       if (!mlx5_eswitch_prios_supported(esw))
-               attr->prio = 1;
+       if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+               NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+               return -EOPNOTSUPP;
+       }
 
        if (attr->chain > max_chain) {
                NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
@@ -1094,10 +1096,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                flow->rule[0] = rule;
        }
 
-       if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
-               e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
-               mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
-       }
+       /* we know that the encap is valid */
+       e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+       mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
 }
 
 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -2966,8 +2967,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
                                return -EOPNOTSUPP;
                        }
-                       action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-                                 MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                       action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        attr->dest_chain = dest_chain;
 
                        continue;
@@ -2980,6 +2980,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
+       if (attr->dest_chain) {
+               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+                       NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+                       return -EOPNOTSUPP;
+               }
+               attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       }
+
        if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "current firmware doesn't support split rule for port mirroring");
index 9d73eb955f75e0c4e19047aae6cf52d9457e6c64..08233cf44871b877679e8ac9273fd1f6d10c587b 100644 (file)
@@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
 
        if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
            --fte->dests_size) {
-               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
                update_fte = true;
        }
 out:
index 30f751e696980d727a86200e1748adda13bb8a22..f7154f358f2766b450cd81bd5c431f1dcd1c9f8d 100644 (file)
@@ -81,6 +81,7 @@ struct mlxsw_core {
        struct mlxsw_core_port *ports;
        unsigned int max_ports;
        bool reload_fail;
+       bool fw_flash_in_progress;
        unsigned long driver_priv[0];
        /* driver_priv has to be always the last item */
 };
@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
        struct rcu_head rcu;
 };
 
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS  3000
+#define MLXSW_EMAD_TIMEOUT_MS                  200
 
 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
 {
        unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
 
+       if (trans->core->fw_flash_in_progress)
+               timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
        queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
 }
 
@@ -1854,6 +1859,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
 }
 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
 static int __init mlxsw_core_module_init(void)
 {
        int err;
index c35be477856f18d6493c4a8c1c6d14e0ef2f2d1b..c4e4971764e54efc101130c87af817f16c7b6106 100644 (file)
@@ -292,6 +292,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
                             u64 *p_single_size, u64 *p_double_size,
                             u64 *p_linear_size);
 
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
                          enum mlxsw_res_id res_id);
 
index 9bec940330a450856d2dba23ed7274321cf82059..f84b9c02fcc5eea8a0a1806831c4b559b31ea4d8 100644 (file)
@@ -309,8 +309,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
                },
                .mlxsw_sp = mlxsw_sp
        };
+       int err;
+
+       mlxsw_core_fw_flash_start(mlxsw_sp->core);
+       err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+       mlxsw_core_fw_flash_end(mlxsw_sp->core);
 
-       return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+       return err;
 }
 
 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -3521,6 +3526,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
        /* NVE traps */
        MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
+       MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
 };
 
 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
index 5c13674439f1f0751a369a3112d19bea46a2464c..b5b54b41349a865ae699a80ef1d2c04774633244 100644 (file)
@@ -977,6 +977,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
 {
        WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
        rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
-       mlxsw_sp->nve = NULL;
        kfree(mlxsw_sp->nve);
+       mlxsw_sp->nve = NULL;
 }
index 6f18f4d3322a6f28f14bf774a269fe8c52411680..451216dd7f6bbc1025ff26b71f426fd76c40d4f8 100644 (file)
@@ -60,6 +60,7 @@ enum {
        MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
        MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
        MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+       MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
        MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
        MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
        MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
index e8ca98c070f68443c6460ecb10d3eb4b3ee9a2f2..20c9377e99cb227f863c3a113014496450b4f1cb 100644 (file)
@@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
        u32 mac_addr_hi = 0;
        u32 mac_addr_lo = 0;
        u32 data;
-       int ret;
 
        netdev = adapter->netdev;
-       lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
-       ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
-                                      0, 1000, 20000, 100);
-       if (ret)
-               return ret;
 
        /* setup auto duplex, and speed detection */
        data = lan743x_csr_read(adapter, MAC_CR);
@@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
        snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
                 "pci-%s", pci_name(adapter->pdev));
 
-       /* set to internal PHY id */
-       adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+       if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+               /* LAN7430 uses internal phy at address 1 */
+               adapter->mdiobus->phy_mask = ~(u32)BIT(1);
 
        /* register mdiobus */
        ret = mdiobus_register(adapter->mdiobus);
index 4c1fb7e578889ac5ea755890e7ff4662259a157d..7cde387e5ec62a0c36f070a163a6e5b9c38a6a4b 100644 (file)
@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
        struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
        struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
        struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
-       u64 data0, data1 = 0, steer_ctrl = 0;
+       u64 data0 = 0, data1 = 0, steer_ctrl = 0;
        enum vxge_hw_status status;
 
        status = vxge_hw_vpath_fw_api(vpath,
index 2f49eb75f3cce3245b7162a2ee3cf664bc0ab7f1..67e576fe7fc0f1ad9ecfc043383780b31250022a 100644 (file)
@@ -345,13 +345,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
                        return -EOPNOTSUPP;
 
-               /* We need to store TCP flags in the IPv4 key space, thus
-                * we need to ensure we include a IPv4 key layer if we have
-                * not done so already.
+               /* We need to store TCP flags in the either the IPv4 or IPv6 key
+                * space, thus we need to ensure we include a IPv4/IPv6 key
+                * layer if we have not done so already.
                 */
-               if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
-                       key_layer |= NFP_FLOWER_LAYER_IPV4;
-                       key_size += sizeof(struct nfp_flower_ipv4);
+               if (!key_basic)
+                       return -EOPNOTSUPP;
+
+               if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+                   !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+                       switch (key_basic->n_proto) {
+                       case cpu_to_be16(ETH_P_IP):
+                               key_layer |= NFP_FLOWER_LAYER_IPV4;
+                               key_size += sizeof(struct nfp_flower_ipv4);
+                               break;
+
+                       case cpu_to_be16(ETH_P_IPV6):
+                               key_layer |= NFP_FLOWER_LAYER_IPV6;
+                               key_size += sizeof(struct nfp_flower_ipv6);
+                               break;
+
+                       default:
+                               return -EOPNOTSUPP;
+                       }
                }
        }
 
index 052b3d2c07a1222b7f902477f30d8eef5a75ab22..c662c6f5bee340f4a0b73e09ae36c7eb4bc878d3 100644 (file)
@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
 {
        struct w90p910_ether *ether = netdev_priv(dev);
        struct platform_device *pdev;
index 5c221ebaa7b34fa863f6f424d22c82cf4d54514d..b38e12c9de9d3a883ef79b30c5d9fb912503e786 100644 (file)
@@ -12831,8 +12831,9 @@ enum MFW_DRV_MSG_TYPE {
        MFW_DRV_MSG_BW_UPDATE10,
        MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
        MFW_DRV_MSG_BW_UPDATE11,
-       MFW_DRV_MSG_OEM_CFG_UPDATE,
+       MFW_DRV_MSG_RESERVED,
        MFW_DRV_MSG_GET_TLV_REQ,
+       MFW_DRV_MSG_OEM_CFG_UPDATE,
        MFW_DRV_MSG_MAX
 };
 
index aa633381aa47ea3bc13de4829bde316448ee0d6b..c6f4bab67a5fc30e28e22c2f4b0bcb391667730b 100644 (file)
@@ -2496,6 +2496,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
                if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
                        DP_NOTICE(cdev,
                                  "Unable to map frag - dropping packet\n");
+                       rc = -ENOMEM;
                        goto err;
                }
 
index 1fd01688d37bdd9c7e09e2444034c0ccb286ea19..209566f8097baa29d2b2b21129219c49c51242b0 100644 (file)
@@ -6469,7 +6469,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                goto out;
        }
 
-       if (status & LinkChg)
+       if (status & LinkChg && tp->dev->phydev)
                phy_mac_interrupt(tp->dev->phydev);
 
        if (unlikely(status & RxFIFOOver &&
index 5551fead8f6646f327dcfc7d5b98d8f3483f3eeb..c4a35e932f05216a6a579d6e7a62bcf81a98ae79 100644 (file)
@@ -4250,6 +4250,7 @@ int stmmac_dvr_probe(struct device *device,
        priv->wq = create_singlethread_workqueue("stmmac_wq");
        if (!priv->wq) {
                dev_err(priv->device, "failed to create workqueue\n");
+               ret = -ENOMEM;
                goto error_wq;
        }
 
index 0ff5a403a8dc356a359fb085be26379ca011b67b..b2ff903a9cb6e56a47814be2559589b73325302a 100644 (file)
@@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
 static void ca8210_rx_done(struct cas_control *cas_ctl)
 {
        u8 *buf;
-       u8 len;
+       unsigned int len;
        struct work_priv_container *mlme_reset_wpc;
        struct ca8210_priv *priv = cas_ctl->priv;
 
@@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
        if (len > CA8210_SPI_BUF_SIZE) {
                dev_crit(
                        &priv->spi->dev,
-                       "Received packet len (%d) erroneously long\n",
+                       "Received packet len (%u) erroneously long\n",
                        len
                );
                goto finish;
index 51b5198d5943422bc61e2201e830176d733d006b..b6743f03dce000578b65bf9a8afddd3c2613d628 100644 (file)
@@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
-       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
                             info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
                             hwsim_edge_policy, NULL))
                return -EINVAL;
@@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
            !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
                return -EINVAL;
 
-       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+       if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
                             info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
                             hwsim_edge_policy, NULL))
                return -EINVAL;
index 18e92c19c5ab8716f6a87e905689a2b0c16b56d4..26c41ede54a4f4fde7a0b009d458bd145fb49f4d 100644 (file)
@@ -308,11 +308,8 @@ static int mdio_bus_phy_restore(struct device *dev)
        if (ret < 0)
                return ret;
 
-       /* The PHY needs to renegotiate. */
-       phydev->link = 0;
-       phydev->state = PHY_UP;
-
-       phy_start_machine(phydev);
+       if (phydev->attached_dev && phydev->adjust_link)
+               phy_start_machine(phydev);
 
        return 0;
 }
index 184c24baca1527333d92ec927c48e958ad6c95f0..d6916f787fce98ae2ce4d51d0619eb5094738543 100644 (file)
@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
                return -EIO;
        }
 
+       /* check if we have a valid interface */
+       if (if_num > 16) {
+               kfree(config_data);
+               return -EINVAL;
+       }
+
        switch (config_data[if_num]) {
        case 0x0:
                result = 0;
@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
 
        /* Get the interface/port specification from either driver_info or from
         * the device itself */
-       if (id->driver_info)
+       if (id->driver_info) {
+               /* if_num is controlled by the device, driver_info is a 0 terminated
+                * array. Make sure, the access is in bounds! */
+               for (i = 0; i <= if_num; ++i)
+                       if (((u32 *)(id->driver_info))[i] == 0)
+                               goto exit;
                port_spec = ((u32 *)(id->driver_info))[if_num];
-       else
+       } else {
                port_spec = hso_get_config_data(interface);
+               if (port_spec < 0)
+                       goto exit;
+       }
 
        /* Check if we need to switch to alt interfaces prior to port
         * configuration */
index be1917be28f2d457c561a117ea92dd49c7f62d50..77d3c85febf18f801a1dc234bf5c1063c264f472 100644 (file)
@@ -2320,6 +2320,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
        ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
        ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
 
+       /* Added to support MAC address changes */
+       ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+       ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
        return 0;
 }
 
index 72a55b6b421184c4fb69411ba3d0150e6c337a88..c8872dd5ff5e1808f36352bf618736ed20068e91 100644 (file)
@@ -1117,6 +1117,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1435, 0xd181, 4)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd181, 5)},    /* Wistron NeWeb D18Q1 */
        {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
+       {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
        {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
        {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
@@ -1229,6 +1230,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
        {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)},    /* Telewell TW-3G HSPA+ */
        {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)},    /* Telewell TW-3G HSPA+ */
        {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
index f1b5201cc32075da27cf14d94b781c9f58c16189..60dd1ec1665f992ea9b50551672d4a85a5b6b670 100644 (file)
 #define USB_UPS_CTRL           0xd800
 #define USB_POWER_CUT          0xd80a
 #define USB_MISC_0             0xd81a
+#define USB_MISC_1             0xd81f
 #define USB_AFE_CTRL2          0xd824
 #define USB_UPS_CFG            0xd842
 #define USB_UPS_FLAGS          0xd848
@@ -555,6 +556,7 @@ enum spd_duplex {
 
 /* MAC PASSTHRU */
 #define AD_MASK                        0xfee0
+#define BND_MASK               0x0004
 #define EFUSE                  0xcfdb
 #define PASS_THRU_MASK         0x1
 
@@ -1150,7 +1152,7 @@ out1:
        return ret;
 }
 
-/* Devices containing RTL8153-AD can support a persistent
+/* Devices containing proper chips can support a persistent
  * host system provided MAC address.
  * Examples of this are Dell TB15 and Dell WD15 docks
  */
@@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
 
        /* test for -AD variant of RTL8153 */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
-       if ((ocp_data & AD_MASK) != 0x1000)
-               return -ENODEV;
-
-       /* test for MAC address pass-through bit */
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
-       if ((ocp_data & PASS_THRU_MASK) != 1)
-               return -ENODEV;
+       if ((ocp_data & AD_MASK) == 0x1000) {
+               /* test for MAC address pass-through bit */
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+               if ((ocp_data & PASS_THRU_MASK) != 1) {
+                       netif_dbg(tp, probe, tp->netdev,
+                                 "No efuse for RTL8153-AD MAC pass through\n");
+                       return -ENODEV;
+               }
+       } else {
+               /* test for RTL8153-BND */
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+               if ((ocp_data & BND_MASK) == 0) {
+                       netif_dbg(tp, probe, tp->netdev,
+                                 "Invalid variant for MAC pass through\n");
+                       return -ENODEV;
+               }
+       }
 
        /* returns _AUXMAC_#AABBCCDDEEFF# */
        status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
@@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
        if (tp->version == RTL_VER_01) {
                ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
        } else {
-               /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
-                * or system doesn't provide valid _SB.AMAC this will be
-                * be expected to non-zero
+               /* if device doesn't support MAC pass through this will
+                * be expected to be non-zero
                 */
                ret = vendor_mac_passthru_addr_read(tp, &sa);
                if (ret < 0)
index 297cdeaef4796501279f826154191eb6e0e0d298..0565f8880199d217ad63c574f058bf1b724524f5 100644 (file)
@@ -568,6 +568,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
        rd->remote_port = port;
        rd->remote_vni = vni;
        rd->remote_ifindex = ifindex;
+       rd->offloaded = false;
        return 1;
 }
 
@@ -3258,6 +3259,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f = NULL;
+       bool unregister = false;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3283,12 +3285,11 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        err = register_netdevice(dev);
        if (err)
                goto errout;
+       unregister = true;
 
        err = rtnl_configure_link(dev, NULL);
-       if (err) {
-               unregister_netdevice(dev);
+       if (err)
                goto errout;
-       }
 
        /* notify default fdb entry */
        if (f)
@@ -3296,9 +3297,16 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+
 errout:
+       /* unregister_netdevice() destroys the default FDB entry with deletion
+        * notification. But the addition notification was not sent yet, so
+        * destroy the entry by hand here.
+        */
        if (f)
                vxlan_fdb_destroy(vxlan, f, false);
+       if (unregister)
+               unregister_netdevice(dev);
        return err;
 }
 
@@ -3534,7 +3542,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct vxlan_rdst *dst = &vxlan->default_dst;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
-       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3560,19 +3567,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                                           old_dst.remote_ifindex, 0);
 
                if (!vxlan_addr_any(&dst->remote_ip)) {
-                       err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                       err = vxlan_fdb_update(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
+                                              NLM_F_APPEND | NLM_F_CREATE,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF, &f);
+                                              NTF_SELF);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
-                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index da607febfd8200fffbdd5cfa575866a7f99cd04b..d210b0ed59beb606bdef977ae0bec7babd00f212 100644 (file)
@@ -2418,6 +2418,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_core_compat_services(struct ath10k *ar)
+{
+       struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+       /* all 10.x firmware versions support thermal throttling but don't
+        * advertise the support via service flags so we have to hardcode
+        * it here
+        */
+       switch (fw_file->wmi_op_version) {
+       case ATH10K_FW_WMI_OP_VERSION_10_1:
+       case ATH10K_FW_WMI_OP_VERSION_10_2:
+       case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                      const struct ath10k_fw_components *fw)
 {
@@ -2617,6 +2639,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
                goto err_hif_stop;
        }
 
+       status = ath10k_core_compat_services(ar);
+       if (status) {
+               ath10k_err(ar, "compat services failed: %d\n", status);
+               goto err_hif_stop;
+       }
+
        /* Some firmware revisions do not properly set up hardware rx filter
         * registers.
         *
index 15964b374f68da69636e256d6a3911cc3eee773b..02988fc378a1581844645661349bdfdfcf3d3993 100644 (file)
@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
                            &fops_pktlog_filter);
 
-       debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
-                           &fops_quiet_period);
+       if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+                                   &fops_quiet_period);
 
        debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
                            &fops_tpc_stats);
index aa8978a8d7514aa5f827ff6de9d4465ffee9a334..fe35edcd3ec8104005310f5c831218493d990754 100644 (file)
@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return;
+
        if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
                return;
 
@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
        struct device *hwmon_dev;
        int ret;
 
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return 0;
+
        cdev = thermal_cooling_device_register("ath10k_thermal", ar,
                                               &ath10k_thermal_ops);
 
@@ -216,6 +222,9 @@ err_cooling_destroy:
 
 void ath10k_thermal_unregister(struct ath10k *ar)
 {
+       if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+               return;
+
        sysfs_remove_link(&ar->dev->kobj, "cooling_device");
        thermal_cooling_device_unregister(ar->thermal.cdev);
 }
index 92c25f51bf868fd8daa4d93b0d7e4b888125ffc2..c2cb413392eeb350fd146ab585178b38768eeecf 100644 (file)
@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
        SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
               WMI_SERVICE_SPOOF_MAC_SUPPORT,
               WMI_TLV_MAX_SERVICE);
+       SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
+              WMI_SERVICE_THERM_THROT,
+              WMI_TLV_MAX_SERVICE);
 }
 
 #undef SVCMAP
index f7badd079051d35601ca1fa6400601d4109604ad..c5a343c93013132d613acae897a5511339c1b94a 100644 (file)
@@ -205,6 +205,7 @@ enum wmi_service {
        WMI_SERVICE_SPOOF_MAC_SUPPORT,
        WMI_SERVICE_TX_DATA_ACK_RSSI,
        WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+       WMI_SERVICE_THERM_THROT,
 
        /* keep last */
        WMI_SERVICE_MAX,
index 2ba890445c356502502a5d948bb2fdb0a21042bf..1689bead1b4fd3f872f41ff073653d1d896740dd 100644 (file)
@@ -881,6 +881,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        int ret, i, j;
        u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 
+       /*
+        * This command is not supported on earlier firmware versions.
+        * Unfortunately, we don't have a TLV API flag to rely on, so
+        * rely on the major version which is in the first byte of
+        * ucode_ver.
+        */
+       if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+               return 0;
+
        ret = iwl_mvm_sar_get_wgds_table(mvm);
        if (ret < 0) {
                IWL_DEBUG_RADIO(mvm,
index e2addd8b878b290bbcb6656f6baeedb258400964..5d75c971004b4e480737aa0fe13627afb1e58cb7 100644 (file)
@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
                                "Send delba to tid=%d, %pM\n",
                                tid, rx_reor_tbl_ptr->ta);
                        mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
-                       return;
+                       goto exit;
                }
        }
+exit:
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
index 8e63d14c1e1c57b4a93d430f5174c245f6a69b08..5380fba652cc49ff2a2aef2528b35e674b8f1a4a 100644 (file)
@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
  * There could be holes in the buffer, which are skipped by the function.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
 {
        int pkt_to_send, i;
        void *rx_tmp_ptr;
+       unsigned long flags;
 
        pkt_to_send = (start_win > tbl->start_win) ?
                      min((start_win - tbl->start_win), tbl->win_size) :
                      tbl->win_size;
 
        for (i = 0; i < pkt_to_send; ++i) {
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                rx_tmp_ptr = NULL;
                if (tbl->rx_reorder_ptr[i]) {
                        rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                        tbl->rx_reorder_ptr[i] = NULL;
                }
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                if (rx_tmp_ptr)
                        mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        /*
         * We don't have a circular buffer, hence use rotation to simulate
         * circular buffer
@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
        }
 
        tbl->start_win = start_win;
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
  * The start window is adjusted automatically when a hole is located.
  * Since the buffer is linear, the function uses rotation to simulate
  * circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
 {
        int i, j, xchg;
        void *rx_tmp_ptr;
+       unsigned long flags;
 
        for (i = 0; i < tbl->win_size; ++i) {
-               if (!tbl->rx_reorder_ptr[i])
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               if (!tbl->rx_reorder_ptr[i]) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        break;
+               }
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        /*
         * We don't have a circular buffer, hence use rotation to simulate
         * circular buffer
@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                }
        }
        tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
  *
  * The function stops the associated timer and dispatches all the
  * pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static void
 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 
        del_timer_sync(&tbl->timer_context.timer);
        tbl->timer_context.timer_is_set = false;
+
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_del(&tbl->list);
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
        kfree(tbl->rx_reorder_ptr);
        kfree(tbl);
 
@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
 /*
  * This function returns the pointer to an entry in Rx reordering
  * table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 struct mwifiex_rx_reorder_tbl *
 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
 {
        struct mwifiex_rx_reorder_tbl *tbl;
+       unsigned long flags;
 
-       list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
-               if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        return tbl;
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return NULL;
 }
@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
                return;
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
-       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
-               if (!memcmp(tbl->ta, ta, ETH_ALEN))
+       list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+               if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        mwifiex_del_rx_reorder_entry(priv, tbl);
+                       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+               }
+       }
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return;
@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
 /*
  * This function finds the last sequence number used in the packets
  * buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
  */
 static int
 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
 {
        struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+       struct mwifiex_private *priv = ctx->priv;
+       unsigned long flags;
        int i;
 
-       for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
-               if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+               if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+                                              flags);
                        return i;
+               }
+       }
+       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        return -1;
 }
@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
        struct reorder_tmr_cnxt *ctx =
                from_timer(ctx, t, timer);
        int start_win, seq_num;
-       unsigned long flags;
 
        ctx->timer_is_set = false;
-       spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
        seq_num = mwifiex_11n_find_last_seq_num(ctx);
 
-       if (seq_num < 0) {
-               spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+       if (seq_num < 0)
                return;
-       }
 
        mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
        start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
        mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
                                                 start_win);
-       spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
 }
 
 /*
@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
         * If we get a TID, ta pair which is already present dispatch all the
         * the packets and move the window size until the ssn
         */
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (tbl) {
                mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        /* if !tbl then create one */
        new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
        if (!new_node)
@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
        int prev_start_win, start_win, end_win, win_size;
        u16 pkt_index;
        bool init_window_shift = false;
-       unsigned long flags;
        int ret = 0;
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                if (pkt_type != PKT_TYPE_BAR)
                        mwifiex_11n_dispatch_pkt(priv, payload);
                return ret;
        }
 
        if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_11n_dispatch_pkt(priv, payload);
                return ret;
        }
@@ -651,8 +666,6 @@ done:
        if (!tbl->timer_context.timer_is_set ||
            prev_start_win != tbl->start_win)
                mwifiex_11n_rxreorder_timer_restart(tbl);
-
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        return ret;
 }
 
@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
                    peer_mac, tid, initiator);
 
        if (cleanup_rx_reorder_tbl) {
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                                 peer_mac);
                if (!tbl) {
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
                        mwifiex_dbg(priv->adapter, EVENT,
                                    "event: TID, TA not found in table\n");
                        return;
                }
                mwifiex_del_rx_reorder_entry(priv, tbl);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
        } else {
                ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
                if (!ptx_tbl) {
@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
        int tid, win_size;
        struct mwifiex_rx_reorder_tbl *tbl;
        uint16_t block_ack_param_set;
-       unsigned long flags;
 
        block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
                            add_ba_rsp->peer_mac_addr, tid);
 
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                     add_ba_rsp->peer_mac_addr);
                if (tbl)
                        mwifiex_del_rx_reorder_entry(priv, tbl);
 
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return 0;
        }
 
        win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
                    >> BLOCKACKPARAM_WINSIZE_POS;
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                             add_ba_rsp->peer_mac_addr);
        if (tbl) {
@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                else
                        tbl->amsdu = false;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        mwifiex_dbg(priv->adapter, CMD,
                    "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_for_each_entry_safe(del_tbl_ptr, tmp_node,
-                                &priv->rx_reorder_tbl_ptr, list)
+                                &priv->rx_reorder_tbl_ptr, list) {
+               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+       }
        INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
        spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
        int tlv_buf_left = len;
        int ret;
        u8 *tmp;
-       unsigned long flags;
 
        mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
                         event_buf, len);
@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
                            tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
                            tlv_bitmap_len);
 
-               spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
                rx_reor_tbl_ptr =
                        mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
                                                       tlv_rxba->mac);
                if (!rx_reor_tbl_ptr) {
-                       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
-                                              flags);
                        mwifiex_dbg(priv->adapter, ERROR,
                                    "Can not find rx_reorder_tbl!");
                        return;
                }
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
                for (i = 0; i < tlv_bitmap_len; i++) {
                        for (j = 0 ; j < 8; j++) {
index a83c5afc256abcb9f3eca164dd84ce323e3b0eaf..5ce85d5727e4b882ebc37372f03bb49003d1a0c9 100644 (file)
@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
                spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
        }
 
-       spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        if (!priv->ap_11n_enabled ||
            (!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
            (le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
                ret = mwifiex_handle_uap_rx_forward(priv, skb);
-               spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
                return ret;
        }
-       spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
 
        /* Reorder and send to kernel */
        pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
index 7cbce03aa65b96c3eee1cfbf75a92879005dcb64..aa426b838ffafeae66aedfbe76f9099a80490310 100644 (file)
@@ -400,7 +400,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
                struct ieee80211_txq *txq = sta->txq[i];
-               struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+               struct mt76_txq *mtxq;
+
+               if (!txq)
+                       continue;
+
+               mtxq = (struct mt76_txq *)txq->drv_priv;
 
                spin_lock_bh(&mtxq->hwq->lock);
                mtxq->send_bar = mtxq->aggr && send_bar;
index f4122c8fdd9777e852ac1bc0f01d9cedcde6c84a..ef9b502ce576b04bbcc562313957bd3619f14451 100644 (file)
@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 
        if (rtl_c2h_fast_cmd(hw, skb)) {
                rtl_c2h_content_parsing(hw, skb);
+               kfree_skb(skb);
                return;
        }
 
index f17f602e6171203acd39ee448c305cba719ff1de..5b97cc946d70a695cb68e1f3e2f93328c29d8bc2 100644 (file)
@@ -905,7 +905,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
-                       BUG_ON(pull_to <= skb_headlen(skb));
+                       BUG_ON(pull_to < skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
index a90a9194ac4a8304df3076cb40dc32d8bc0605c7..fed29de783e00ff554e2e0c8e0e0849bf70d6c35 100644 (file)
@@ -1064,7 +1064,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                .regs           = aer_regs,
        };
 
-       if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+       if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
                                 &aer_recover_ring_lock))
                schedule_work(&aer_recover_work);
        else
index 53d449076dee32bb64cf3f0093b4a7e9016b9fa7..ea87d739f534bdf9ede84639486cf3e41195ae7b 100644 (file)
@@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
                case PIN_CONFIG_BIAS_DISABLE:
                        dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
-                       meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+                       meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+                                              &bit);
                        ret = regmap_update_bits(pc->reg_pullen, reg,
                                                 BIT(bit), 0);
                        if (ret)
index 6838b38555a106ceb1c8e6231598e48c5217e046..1bfb0ae6b387967001b147ff99fc3e63975fafec 100644 (file)
@@ -33,7 +33,7 @@ enum {
        }
 
 
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9)        \
        {                                               \
                .name = "gpio" #id,                     \
                .pins = gpio##id##_pins,                \
@@ -51,11 +51,12 @@ enum {
                        msm_mux_##f9                    \
                },                                      \
                .nfuncs = 10,                           \
-               .ctl_reg = base + REG_SIZE * id,        \
-               .io_reg = base + 0x4 + REG_SIZE * id,           \
-               .intr_cfg_reg = base + 0x8 + REG_SIZE * id,             \
-               .intr_status_reg = base + 0xc + REG_SIZE * id,  \
-               .intr_target_reg = base + 0x8 + REG_SIZE * id,  \
+               .ctl_reg = REG_SIZE * id,               \
+               .io_reg = 0x4 + REG_SIZE * id,          \
+               .intr_cfg_reg = 0x8 + REG_SIZE * id,    \
+               .intr_status_reg = 0xc + REG_SIZE * id, \
+               .intr_target_reg = 0x8 + REG_SIZE * id, \
+               .tile = _tile,                  \
                .mux_bit = 2,                   \
                .pull_bit = 0,                  \
                .drv_bit = 6,                   \
@@ -82,6 +83,7 @@ enum {
                .intr_cfg_reg = 0,                      \
                .intr_status_reg = 0,                   \
                .intr_target_reg = 0,                   \
+               .tile = NORTH,                          \
                .mux_bit = -1,                          \
                .pull_bit = pull,                       \
                .drv_bit = drv,                         \
@@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = {
        PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
        PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
        PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
-       SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
-       SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
-       SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
-       SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
-       SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
-       SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
-       SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+       SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+       SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+       SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+       SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6),
+       SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3),
+       SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0),
+       SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
 };
 
 static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
index 6624499eae72f5c2ba986c8c54c6f7e583f05f2a..4ada80317a3bd56bab5627b7ae947d1d3a82c352 100644 (file)
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)),  /* PH_EINT11 */
+                 SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
 };
 
 static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
index cd160f2ec75d3ddd4803904fcd0b14c8742f86e1..bcd30e2374f18e8be5d16d88b117016dd59377df 100644 (file)
@@ -2364,7 +2364,7 @@ static int _bnx2fc_create(struct net_device *netdev,
        if (!interface) {
                printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
                rc = -ENOMEM;
-               goto ifput_err;
+               goto netdev_err;
        }
 
        if (is_vlan_dev(netdev)) {
index b658b9a5eb1e172b6549d05b1e8c3aece913f715..d0ecc729a90a3706b979898a00709d01312e3579 100644 (file)
@@ -4886,10 +4886,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                        fcport->d_id = e->u.new_sess.id;
                        fcport->flags |= FCF_FABRIC_DEVICE;
                        fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
                                fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
                                fcport->fc4_type = FC4_TYPE_OTHER;
                                fcport->fc4f_nvme = FC4_TYPE_NVME;
                        }
index a7a34e89c42db574c51e476f7809408076ef431a..3252efa422f94749abcd6f6f87c1dafd5db04c1d 100644 (file)
@@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS
        depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
        depends on HAS_DMA
        depends on OF
+       depends on MEDIA_CONTROLLER_REQUEST_API
        select SUNXI_SRAM
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index 32adbcbe6175b7fd1a611a529a8e9797f03a5175..07520a2ce179bc4755b2f56771f79369824d97dc 100644 (file)
@@ -255,10 +255,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
 
        res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
        dev->base = devm_ioremap_resource(dev->dev, res);
-       if (!dev->base) {
+       if (IS_ERR(dev->base)) {
                v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
 
-               ret = -ENOMEM;
+               ret = PTR_ERR(dev->base);
                goto err_sram;
        }
 
index c4111a98f1a79570c9eb98e7d0fcf9dbcbf2ec67..2d26ae80e20228e96bcb2e4b4a2958f2dd57bbee 100644 (file)
@@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
        struct platform_device *pdev = data->pdev;
        struct device *dev = &pdev->dev;
 
-       data->nr_sensors = 2;
+       data->nr_sensors = 1;
 
        data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
                                    data->nr_sensors, GFP_KERNEL);
@@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
                        return ret;
                }
 
-               ret = platform_get_irq_byname(pdev, sensor->irq_name);
+               ret = platform_get_irq(pdev, 0);
                if (ret < 0)
                        return ret;
 
index 47623da0f91b031f8155147573785dd911568e9c..bbd73c5a4a4e92f4c3d361f129e7ec7d409a6d94 100644 (file)
@@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
                sensor->t0 = TS1_T0_VAL1;
 
        /* Retrieve fmt0 and put it on Hz */
-       sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
-                                             & TS1_FMT0_MASK;
+       sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
+                                DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
 
        /* Retrieve ramp coefficient */
        sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
@@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
        if (ret)
                return ret;
 
+       ret = stm_thermal_read_factory_settings(sensor);
+       if (ret)
+               goto thermal_unprepare;
+
        ret = stm_thermal_calibration(sensor);
        if (ret)
                goto thermal_unprepare;
@@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
        /* Populate sensor */
        sensor->base = base;
 
-       ret = stm_thermal_read_factory_settings(sensor);
-       if (ret)
-               return ret;
-
        sensor->clk = devm_clk_get(&pdev->dev, "pclk");
        if (IS_ERR(sensor->clk)) {
                dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
index f776b3eafb9619578986f2c0a3b0234fc2842067..3f779d25ec0cdfa10575b153b62dc34c0c5b218b 100644 (file)
@@ -552,30 +552,11 @@ static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
  */
 static void serial8250_clear_fifos(struct uart_8250_port *p)
 {
-       unsigned char fcr;
-       unsigned char clr_mask = UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
-
        if (p->capabilities & UART_CAP_FIFO) {
-               /*
-                * Make sure to avoid changing FCR[7:3] and ENABLE_FIFO bits.
-                * In case ENABLE_FIFO is not set, there is nothing to flush
-                * so just return. Furthermore, on certain implementations of
-                * the 8250 core, the FCR[7:3] bits may only be changed under
-                * specific conditions and changing them if those conditions
-                * are not met can have nasty side effects. One such core is
-                * the 8250-omap present in TI AM335x.
-                */
-               fcr = serial_in(p, UART_FCR);
-
-               /* FIFO is not enabled, there's nothing to clear. */
-               if (!(fcr & UART_FCR_ENABLE_FIFO))
-                       return;
-
-               fcr |= clr_mask;
-               serial_out(p, UART_FCR, fcr);
-
-               fcr &= ~clr_mask;
-               serial_out(p, UART_FCR, fcr);
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+               serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+                              UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+               serial_out(p, UART_FCR, 0);
        }
 }
 
@@ -1467,7 +1448,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
         * Enable previously disabled RX interrupts.
         */
        if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
-               serial8250_clear_fifos(p);
+               serial8250_clear_and_reinit_fifos(p);
 
                p->ier |= UART_IER_RLSI | UART_IER_RDI;
                serial_port_out(&p->port, UART_IER, p->ier);
index c2493d0112257798d3b50660e2ae4e1fbcef37f8..3c5169eb23f5c7069cf1fbd35dc0c929a1c51c58 100644 (file)
@@ -204,9 +204,11 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
        if (atomic_inc_return(&pdata->refcnt) != 1)
                return 0;
 
+       vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
+       vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+
        ret = vmbus_connect_ring(dev->channel,
                                 hv_uio_channel_cb, dev->channel);
-
        if (ret == 0)
                dev->channel->inbound.ring_buffer->interrupt_mask = 1;
        else
@@ -334,9 +336,6 @@ hv_uio_probe(struct hv_device *dev,
                goto fail_close;
        }
 
-       vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
-       vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
-
        ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
        if (ret)
                dev_notice(&dev->device,
index 94aca1b5ac8a228b6ecb690f84441f37c976fbdb..01b5818a4be5dceba4ba7dea73c285867b6fb773 100644 (file)
@@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                portsc_buf[port_index] = 0;
 
                /* Bail out if a USB3 port has a new device in link training */
-               if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+               if ((hcd->speed >= HCD_USB3) &&
+                   (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
                        bus_state->bus_suspended = 0;
                        spin_unlock_irqrestore(&xhci->lock, flags);
                        xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
index c3515bad5dbbad26efcc71f89fdd64cc28911329..011dd45f871815915a3b9a9c09cd80238c0e5430 100644 (file)
@@ -1863,6 +1863,8 @@ struct xhci_hcd {
        unsigned                sw_lpm_support:1;
        /* support xHCI 1.0 spec USB2 hardware LPM */
        unsigned                hw_lpm_support:1;
+       /* Broken Suspend flag for SNPS Suspend resume issue */
+       unsigned                broken_suspend:1;
        /* cached usb2 extened protocol capabilites */
        u32                     *ext_caps;
        unsigned int            num_ext_caps;
@@ -1880,8 +1882,6 @@ struct xhci_hcd {
        void                    *dbc;
        /* platform-specific data -- must come last */
        unsigned long           priv[0] __aligned(sizeof(s64));
-       /* Broken Suspend flag for SNPS Suspend resume issue */
-       u8                      broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
index e24ff16d4147754b382595dc401fa79bf815ac6b..1ce27f3ff7a78771d1c8037de8de604020de0f1f 100644 (file)
@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+       { USB_DEVICE(TELIT_VENDOR_ID, 0x1900),                          /* Telit LN940 (QMI) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff),    /* Telit LN940 (MBIM) */
+         .driver_info = NCTRL(0) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) },    /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
          .driver_info = RSVD(4) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(2) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
          .driver_info = RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) },    /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
        { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
          .driver_info = RSVD(5) | RSVD(6) },
+       { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) },   /* Simcom SIM7500/SIM7600 MBIM mode */
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
          .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) },    /* HP lt2523 (Novatel E371) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) },    /* HP lt4132 (Huawei ME906s-158) */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+       { USB_DEVICE(0x1508, 0x1001),                                           /* Fibocom NL668 */
+         .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index ab11b2bee2739f261790559237f8848cd3eb0174..ad7a6f475a442a9b08843f77708729b3e123317b 100644 (file)
@@ -513,7 +513,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
        struct socket *sock;
        struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
 
-       mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+       /* Try to hold the vq mutex of the paired virtqueue. We can't
+        * use mutex_lock() here since we could not guarantee a
+        * consistenet lock ordering.
+        */
+       if (!mutex_trylock(&vq->mutex))
+               return;
+
        vhost_disable_notify(&net->dev, vq);
        sock = rvq->private_data;
 
index 6b98d8e3a5bf8247784303ce890a990fb8ec1259..55e5aa662ad59d4b72c44db743198876654f2d2d 100644 (file)
@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
 {
        int i;
 
-       for (i = 0; i < d->nvqs; ++i) {
-               mutex_lock(&d->vqs[i]->mutex);
+       for (i = 0; i < d->nvqs; ++i)
                __vhost_vq_meta_reset(d->vqs[i]);
-               mutex_unlock(&d->vqs[i]->mutex);
-       }
 }
 
 static void vhost_vq_reset(struct vhost_dev *dev,
@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
 #define vhost_get_used(vq, x, ptr) \
        vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
 
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+       int i = 0;
+       for (i = 0; i < d->nvqs; ++i)
+               mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+       int i = 0;
+       for (i = 0; i < d->nvqs; ++i)
+               mutex_unlock(&d->vqs[i]->mutex);
+}
+
 static int vhost_new_umem_range(struct vhost_umem *umem,
                                u64 start, u64 size, u64 end,
                                u64 userspace_addr, int perm)
@@ -976,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
        int ret = 0;
 
        mutex_lock(&dev->mutex);
+       vhost_dev_lock_vqs(dev);
        switch (msg->type) {
        case VHOST_IOTLB_UPDATE:
                if (!dev->iotlb) {
@@ -1009,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
                break;
        }
 
+       vhost_dev_unlock_vqs(dev);
        mutex_unlock(&dev->mutex);
 
        return ret;
@@ -2220,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
                return -EFAULT;
        }
        if (unlikely(vq->log_used)) {
+               /* Make sure used idx is seen before log. */
+               smp_wmb();
                /* Log used index update. */
                log_write(vq->log_base,
                          vq->log_addr + offsetof(struct vring_used, idx),
index 678b270631983a47718d5d8d78f395e9d639ec83..f9ef0673a083cb63c776ca58631710e659329ee3 100644 (file)
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                goto err_alloc;
        }
 
-       if (!data->levels) {
+       if (data->levels) {
+               /*
+                * For the DT case, only when brightness levels is defined
+                * data->levels is filled. For the non-DT case, data->levels
+                * can come from platform data, however is not usual.
+                */
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
+
+                       pb->levels = data->levels;
+               }
+       } else if (!data->max_brightness) {
+               /*
+                * If no brightness levels are provided and max_brightness is
+                * not set, use the default brightness table. For the DT case,
+                * max_brightness is set to 0 when brightness levels is not
+                * specified. For the non-DT case, max_brightness is usually
+                * set to some value.
+                */
+
+               /* Get the PWM period (in nanoseconds) */
+               pwm_get_state(pb->pwm, &state);
+
                ret = pwm_backlight_brightness_default(&pdev->dev, data,
                                                       state.period);
                if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                                "failed to setup default brightness table\n");
                        goto err_alloc;
                }
-       }
 
-       for (i = 0; i <= data->max_brightness; i++) {
-               if (data->levels[i] > pb->scale)
-                       pb->scale = data->levels[i];
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
 
-               pb->levels = data->levels;
+                       pb->levels = data->levels;
+               }
+       } else {
+               /*
+                * That only happens for the non-DT case, where platform data
+                * sets the max_brightness value.
+                */
+               pb->scale = data->max_brightness;
        }
 
        pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
index 97f9835929256bfd6027d6958e14fb3ec64d34da..aac9659381d2564542eecaa71932aeea7f6720e2 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -45,6 +45,7 @@
 
 #include <asm/kmap_types.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "internal.h"
 
@@ -1038,6 +1039,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
        if (!table || id >= table->nr)
                goto out;
 
+       id = array_index_nospec(id, table->nr);
        ctx = rcu_dereference(table->table[id]);
        if (ctx && ctx->user_id == ctx_id) {
                if (percpu_ref_tryget_live(&ctx->users))
index b5ecd6f50360d53e0fe24c4ea79416722e93fc6c..4e9a7cc488da16efea5f2a69b95f64bd23e1a245 100644 (file)
@@ -563,8 +563,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",noacl");
 #endif
 
-       if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
-               seq_puts(m, ",nocopyfrom");
+       if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
+               seq_puts(m, ",copyfrom");
 
        if (fsopt->mds_namespace)
                seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
index c005a5400f2ed9b1e0e932ea680c2f0824020277..79a265ba92006b309796c525a5a2db3e9522f076 100644 (file)
@@ -42,7 +42,9 @@
 #define CEPH_MOUNT_OPT_NOQUOTADF       (1<<13) /* no root dir quota in statfs */
 #define CEPH_MOUNT_OPT_NOCOPYFROM      (1<<14) /* don't use RADOS 'copy-from' op */
 
-#define CEPH_MOUNT_OPT_DEFAULT    CEPH_MOUNT_OPT_DCACHE
+#define CEPH_MOUNT_OPT_DEFAULT                 \
+       (CEPH_MOUNT_OPT_DCACHE |                \
+        CEPH_MOUNT_OPT_NOCOPYFROM)
 
 #define ceph_set_mount_opt(fsc, opt) \
        (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
index 47395b0c3b35e5fde0110eb142414010f06b126a..e909678afa2d06770946b39903e86ef0ee8cb6b4 100644 (file)
@@ -1119,8 +1119,10 @@ static int fuse_permission(struct inode *inode, int mask)
        if (fc->default_permissions ||
            ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
                struct fuse_inode *fi = get_fuse_inode(inode);
+               u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
 
-               if (time_before64(fi->i_time, get_jiffies_64())) {
+               if (perm_mask & READ_ONCE(fi->inval_mask) ||
+                   time_before64(fi->i_time, get_jiffies_64())) {
                        refreshed = true;
 
                        err = fuse_perm_getattr(inode, mask);
@@ -1241,7 +1243,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
 
 static int fuse_dir_release(struct inode *inode, struct file *file)
 {
-       fuse_release_common(file, FUSE_RELEASEDIR);
+       fuse_release_common(file, true);
 
        return 0;
 }
@@ -1249,7 +1251,25 @@ static int fuse_dir_release(struct inode *inode, struct file *file)
 static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
                          int datasync)
 {
-       return fuse_fsync_common(file, start, end, datasync, 1);
+       struct inode *inode = file->f_mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       int err;
+
+       if (is_bad_inode(inode))
+               return -EIO;
+
+       if (fc->no_fsyncdir)
+               return 0;
+
+       inode_lock(inode);
+       err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
+       if (err == -ENOSYS) {
+               fc->no_fsyncdir = 1;
+               err = 0;
+       }
+       inode_unlock(inode);
+
+       return err;
 }
 
 static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
index b52f9baaa3e7b9c98478a8c115748ae71fb7b0e1..ffaffe18352a1bb53708f3ac2c0f2638a24337ab 100644 (file)
@@ -89,12 +89,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
        iput(req->misc.release.inode);
 }
 
-static void fuse_file_put(struct fuse_file *ff, bool sync)
+static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
 {
        if (refcount_dec_and_test(&ff->count)) {
                struct fuse_req *req = ff->reserved_req;
 
-               if (ff->fc->no_open) {
+               if (ff->fc->no_open && !isdir) {
                        /*
                         * Drop the release request when client does not
                         * implement 'open'
@@ -247,10 +247,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
        req->in.args[0].value = inarg;
 }
 
-void fuse_release_common(struct file *file, int opcode)
+void fuse_release_common(struct file *file, bool isdir)
 {
        struct fuse_file *ff = file->private_data;
        struct fuse_req *req = ff->reserved_req;
+       int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
 
        fuse_prepare_release(ff, file->f_flags, opcode);
 
@@ -272,7 +273,7 @@ void fuse_release_common(struct file *file, int opcode)
         * synchronous RELEASE is allowed (and desirable) in this case
         * because the server can be trusted not to screw up.
         */
-       fuse_file_put(ff, ff->fc->destroy_req != NULL);
+       fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
 }
 
 static int fuse_open(struct inode *inode, struct file *file)
@@ -288,7 +289,7 @@ static int fuse_release(struct inode *inode, struct file *file)
        if (fc->writeback_cache)
                write_inode_now(inode, 1);
 
-       fuse_release_common(file, FUSE_RELEASE);
+       fuse_release_common(file, false);
 
        /* return value is ignored by VFS */
        return 0;
@@ -302,7 +303,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
         * iput(NULL) is a no-op and since the refcount is 1 and everything's
         * synchronous, we are fine with not doing igrab() here"
         */
-       fuse_file_put(ff, true);
+       fuse_file_put(ff, true, false);
 }
 EXPORT_SYMBOL_GPL(fuse_sync_release);
 
@@ -441,13 +442,30 @@ static int fuse_flush(struct file *file, fl_owner_t id)
 }
 
 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
-                     int datasync, int isdir)
+                     int datasync, int opcode)
 {
        struct inode *inode = file->f_mapping->host;
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_file *ff = file->private_data;
        FUSE_ARGS(args);
        struct fuse_fsync_in inarg;
+
+       memset(&inarg, 0, sizeof(inarg));
+       inarg.fh = ff->fh;
+       inarg.fsync_flags = datasync ? 1 : 0;
+       args.in.h.opcode = opcode;
+       args.in.h.nodeid = get_node_id(inode);
+       args.in.numargs = 1;
+       args.in.args[0].size = sizeof(inarg);
+       args.in.args[0].value = &inarg;
+       return fuse_simple_request(fc, &args);
+}
+
+static int fuse_fsync(struct file *file, loff_t start, loff_t end,
+                     int datasync)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
 
        if (is_bad_inode(inode))
@@ -479,34 +497,18 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
        if (err)
                goto out;
 
-       if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
+       if (fc->no_fsync)
                goto out;
 
-       memset(&inarg, 0, sizeof(inarg));
-       inarg.fh = ff->fh;
-       inarg.fsync_flags = datasync ? 1 : 0;
-       args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
-       args.in.h.nodeid = get_node_id(inode);
-       args.in.numargs = 1;
-       args.in.args[0].size = sizeof(inarg);
-       args.in.args[0].value = &inarg;
-       err = fuse_simple_request(fc, &args);
+       err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
        if (err == -ENOSYS) {
-               if (isdir)
-                       fc->no_fsyncdir = 1;
-               else
-                       fc->no_fsync = 1;
+               fc->no_fsync = 1;
                err = 0;
        }
 out:
        inode_unlock(inode);
-       return err;
-}
 
-static int fuse_fsync(struct file *file, loff_t start, loff_t end,
-                     int datasync)
-{
-       return fuse_fsync_common(file, start, end, datasync, 0);
+       return err;
 }
 
 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
@@ -807,7 +809,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
                put_page(page);
        }
        if (req->ff)
-               fuse_file_put(req->ff, false);
+               fuse_file_put(req->ff, false, false);
 }
 
 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1460,7 +1462,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
                __free_page(req->pages[i]);
 
        if (req->ff)
-               fuse_file_put(req->ff, false);
+               fuse_file_put(req->ff, false, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1619,7 +1621,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
        ff = __fuse_write_file_get(fc, fi);
        err = fuse_flush_times(inode, ff);
        if (ff)
-               fuse_file_put(ff, 0);
+               fuse_file_put(ff, false, false);
 
        return err;
 }
@@ -1940,7 +1942,7 @@ static int fuse_writepages(struct address_space *mapping,
                err = 0;
        }
        if (data.ff)
-               fuse_file_put(data.ff, false);
+               fuse_file_put(data.ff, false, false);
 
        kfree(data.orig_pages);
 out:
index e9f712e81c7d9e188ae174d2bb3f0ba3b853d90f..2f2c92e6f8cbe1686660af41527aaeaabf908693 100644 (file)
@@ -822,13 +822,13 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
 /**
  * Send RELEASE or RELEASEDIR request
  */
-void fuse_release_common(struct file *file, int opcode);
+void fuse_release_common(struct file *file, bool isdir);
 
 /**
  * Send FSYNC or FSYNCDIR request
  */
 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
-                     int datasync, int isdir);
+                     int datasync, int opcode);
 
 /**
  * Notify poll wakeup
index 0b94b23b02d4798d557f09020b5a4c7fda91c15f..568abed20eb20a097f43c108db7f274947ad604a 100644 (file)
@@ -115,7 +115,7 @@ static void fuse_i_callback(struct rcu_head *head)
 static void fuse_destroy_inode(struct inode *inode)
 {
        struct fuse_inode *fi = get_fuse_inode(inode);
-       if (S_ISREG(inode->i_mode)) {
+       if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
                WARN_ON(!list_empty(&fi->write_files));
                WARN_ON(!list_empty(&fi->queued_writes));
        }
@@ -1068,6 +1068,7 @@ void fuse_dev_free(struct fuse_dev *fud)
 
                fuse_conn_put(fc);
        }
+       kfree(fud->pq.processing);
        kfree(fud);
 }
 EXPORT_SYMBOL_GPL(fuse_dev_free);
index c6289147c7871f165b70f5ca8e13668f3010d4b0..82c129bfe58d9e3a8be301fecc9bbbb4e1de88d5 100644 (file)
@@ -651,6 +651,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
        return ovl_create_object(dentry, S_IFLNK, 0, link);
 }
 
+static int ovl_set_link_redirect(struct dentry *dentry)
+{
+       const struct cred *old_cred;
+       int err;
+
+       old_cred = ovl_override_creds(dentry->d_sb);
+       err = ovl_set_redirect(dentry, false);
+       revert_creds(old_cred);
+
+       return err;
+}
+
 static int ovl_link(struct dentry *old, struct inode *newdir,
                    struct dentry *new)
 {
@@ -670,7 +682,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
                goto out_drop_write;
 
        if (ovl_is_metacopy_dentry(old)) {
-               err = ovl_set_redirect(old, false);
+               err = ovl_set_link_redirect(old);
                if (err)
                        goto out_drop_write;
        }
index 8fa37cd7818adfe5272027ad31c24e764837a1c0..54e5d17d7f3e5c91488b6ec4cab54c8177850fd3 100644 (file)
@@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
                goto out;
        }
 
-       /* Otherwise, get a connected non-upper dir or disconnected non-dir */
-       if (d_is_dir(origin.dentry) &&
-           (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
+       /* Find origin.dentry again with ovl_acceptable() layer check */
+       if (d_is_dir(origin.dentry)) {
                dput(origin.dentry);
                origin.dentry = NULL;
                err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
@@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
                        goto out_err;
        }
 
+       /* Get a connected non-upper dir or disconnected non-dir */
        dentry = ovl_get_dentry(sb, NULL, &origin, index);
 
 out:
index 6bcc9dedc342cc7cf141abbc5220f4a0aa5ce1da..3b7ed5d2279c6a8efde8180471bde94ef1020964 100644 (file)
@@ -286,22 +286,13 @@ int ovl_permission(struct inode *inode, int mask)
        if (err)
                return err;
 
-       /* No need to do any access on underlying for special files */
-       if (special_file(realinode->i_mode))
-               return 0;
-
-       /* No need to access underlying for execute */
-       mask &= ~MAY_EXEC;
-       if ((mask & (MAY_READ | MAY_WRITE)) == 0)
-               return 0;
-
-       /* Lower files get copied up, so turn write access into read */
-       if (!upperinode && mask & MAY_WRITE) {
+       old_cred = ovl_override_creds(inode->i_sb);
+       if (!upperinode &&
+           !special_file(realinode->i_mode) && mask & MAY_WRITE) {
                mask &= ~(MAY_WRITE | MAY_APPEND);
+               /* Make sure mounter can read file for copy up later */
                mask |= MAY_READ;
        }
-
-       old_cred = ovl_override_creds(inode->i_sb);
        err = inode_permission(realinode, mask);
        revert_creds(old_cred);
 
index cd58939dc977e481930053f37e3137394265d75e..7a85e609fc276f28dd80c8dc481e383094d1dfe9 100644 (file)
@@ -1566,7 +1566,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                cond_resched();
 
                BUG_ON(!vma_can_userfault(vma));
-               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
 
                /*
                 * Nothing to do: this vma is already registered into this
@@ -1575,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                if (!vma->vm_userfaultfd_ctx.ctx)
                        goto skip;
 
+               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
                if (vma->vm_start > start)
                        start = vma->vm_start;
                vma_end = min(end, vma->vm_end);
index 827e4d3bbc7a46ef59222651a8020234addc82cb..8cc7b09c1bc7173e0fc0128294a3dac43e155d3e 100644 (file)
@@ -16,6 +16,7 @@
 #define __ASM_GENERIC_FIXMAP_H
 
 #include <linux/bug.h>
+#include <linux/mm_types.h>
 
 #define __fix_to_virt(x)       (FIXADDR_TOP - ((x) << PAGE_SHIFT))
 #define __virt_to_fix(x)       ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
index 795ff0b869bbf6403c0e89b87fdc7855e3bbb5cd..a8b9d90a804223e7a82be545b90b285b6cacb9d8 100644 (file)
@@ -861,7 +861,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
 extern int bpf_jit_enable;
 extern int bpf_jit_harden;
 extern int bpf_jit_kallsyms;
-extern int bpf_jit_limit;
+extern long bpf_jit_limit;
 
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
index 34e17e6f894290f161d5e734e6fb2f9dfb0c148e..4e77bfe0b5803330e14c6fe39f038089713d2fcb 100644 (file)
@@ -582,11 +582,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
 };
 
 struct mlx5_ifc_flow_table_eswitch_cap_bits {
-       u8      reserved_at_0[0x1c];
-       u8      fdb_multi_path_to_table[0x1];
-       u8      reserved_at_1d[0x1];
+       u8      reserved_at_0[0x1a];
        u8      multi_fdb_encap[0x1];
-       u8      reserved_at_1e[0x1e1];
+       u8      reserved_at_1b[0x1];
+       u8      fdb_multi_path_to_table[0x1];
+       u8      reserved_at_1d[0x3];
+
+       u8      reserved_at_20[0x1e0];
 
        struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
 
index 5ed8f6292a533c2efbc4390d1a6a165bf4ad2252..2c471a2c43fa7137f1780b976615a6cbef5f6b6e 100644 (file)
@@ -206,6 +206,11 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT  (order_base_2(sizeof(struct page)))
+
 #define PAGE_FRAG_CACHE_MAX_SIZE       __ALIGN_MASK(32768, ~PAGE_MASK)
 #define PAGE_FRAG_CACHE_MAX_ORDER      get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 
index 847705a6d0ec2dba2a82ca87bba54c797bb0b956..db023a92f3a4a814d30927af066411ac5532270f 100644 (file)
@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
 static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
 #endif
 
+#if defined(CONFIG_SPARSEMEM)
+void memblocks_present(void);
+#else
+static inline void memblocks_present(void) {}
+#endif
+
 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 int local_memory_node(int node_id);
 #else
index 01797cb4587ede275db60176820571a3dea92c18..a0dcc9b6a723754896aaf638114e07b7fc0bdfcf 100644 (file)
@@ -565,7 +565,7 @@ struct platform_device_id {
 /**
  * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
  * @phy_id: The result of
- *     (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ *     (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
  *     for this PHY type
  * @phy_id_mask: Defines the significant bits of @phy_id.  A value of 0
  *     is used to terminate an array of struct mdio_device_id.
index 4a520d3304a2fad7a47ddd366672c2fbb054ec1e..cf09ab37b45b7d5990c388799b7f502d500cd182 100644 (file)
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss)                                        \
-       rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
 
index b9626aa7e90c67e1ecfa38d07d9a4754213313d0..3e2a80cc7b56dd3da0a13918de609cd11a1801c9 100644 (file)
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
 
 static inline u32 t10_pi_ref_tag(struct request *rq)
 {
+       unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-       return blk_rq_pos(rq) >>
-               (rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
-       return -1U;
+       if (rq->q->integrity.interval_exp)
+               shift = rq->q->integrity.interval_exp;
 #endif
+       return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
 }
 
 extern const struct blk_integrity_profile t10_pi_type1_crc;
index 564892e19f8caac321780119747b0f18435f3af3..f492e21c4aa2c81f5301116408f84d0703a61a18 100644 (file)
@@ -553,6 +553,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
        return curr;
 }
 
+/**
+ * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context.  Takes and releases the xa_lock while
+ * disabling softirqs.  May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
+                       void *old, void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_bh(xa);
+       curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+       xa_unlock_bh(xa);
+
+       return curr;
+}
+
+/**
+ * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context.  Takes and releases the xa_lock while
+ * disabling interrupts.  May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
+                       void *old, void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_irq(xa);
+       curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+       xa_unlock_irq(xa);
+
+       return curr;
+}
+
 /**
  * xa_insert() - Store this entry in the XArray unless another entry is
  *                     already present.
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644 (file)
index 0000000..d21f40e
--- /dev/null
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the MPEG2 state controls for use with stateless MPEG-2
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _MPEG2_CTRLS_H_
+#define _MPEG2_CTRLS_H_
+
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS         (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION         (V4L2_CID_MPEG_BASE+251)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
+#define        V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
+
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I       1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P       2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B       3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D       4
+
+struct v4l2_mpeg2_sequence {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+       __u16   horizontal_size;
+       __u16   vertical_size;
+       __u32   vbv_buffer_size;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+       __u8    profile_and_level_indication;
+       __u8    progressive_sequence;
+       __u8    chroma_format;
+       __u8    pad;
+};
+
+struct v4l2_mpeg2_picture {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+       __u8    picture_coding_type;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+       __u8    f_code[2][2];
+       __u8    intra_dc_precision;
+       __u8    picture_structure;
+       __u8    top_field_first;
+       __u8    frame_pred_frame_dct;
+       __u8    concealment_motion_vectors;
+       __u8    q_scale_type;
+       __u8    intra_vlc_format;
+       __u8    alternate_scan;
+       __u8    repeat_first_field;
+       __u8    progressive_frame;
+       __u8    pad;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+       __u32   bit_size;
+       __u32   data_bit_offset;
+
+       struct v4l2_mpeg2_sequence sequence;
+       struct v4l2_mpeg2_picture picture;
+
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+       __u8    quantiser_scale_code;
+
+       __u8    backward_ref_index;
+       __u8    forward_ref_index;
+       __u8    pad;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+       __u8    load_intra_quantiser_matrix;
+       __u8    load_non_intra_quantiser_matrix;
+       __u8    load_chroma_intra_quantiser_matrix;
+       __u8    load_chroma_non_intra_quantiser_matrix;
+
+       __u8    intra_quantiser_matrix[64];
+       __u8    non_intra_quantiser_matrix[64];
+       __u8    chroma_intra_quantiser_matrix[64];
+       __u8    chroma_non_intra_quantiser_matrix[64];
+};
+
+#endif
index 83ce0593b275c7462ff787a98156d59d6f6833e0..d63cf227b0ab9a77f2c4c81cf4d6c359473286f8 100644 (file)
 #include <linux/videodev2.h>
 #include <media/media-request.h>
 
+/*
+ * Include the mpeg2 stateless codec compound control definitions.
+ * This will move to the public headers once this API is fully stable.
+ */
+#include <media/mpeg2-ctrls.h>
+
 /* forward references */
 struct file;
 struct v4l2_ctrl_handler;
index e86981d615ae4930968cb22b0c85c28f38dfb74f..4a737b2c610bf0a5f2f246c36b3bae4775543043 100644 (file)
@@ -239,6 +239,7 @@ struct vb2_queue;
  * @num_planes:                number of planes in the buffer
  *                     on an internal driver queue.
  * @timestamp:         frame timestamp in ns.
+ * @request:           the request this buffer is associated with.
  * @req_obj:           used to bind this buffer to a request. This
  *                     request object has a refcount.
  */
@@ -249,6 +250,7 @@ struct vb2_buffer {
        unsigned int            memory;
        unsigned int            num_planes;
        u64                     timestamp;
+       struct media_request    *request;
        struct media_request_object     req_obj;
 
        /* private: internal use only
index b0d022ff6ea1702037b84a038f3c81ce56540aa4..5ce926701bd02f102b5b1dca28e17d50616a5fd1 100644 (file)
@@ -144,25 +144,6 @@ struct ip_tunnel {
        bool                    ignore_df;
 };
 
-#define TUNNEL_CSUM            __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING         __cpu_to_be16(0x02)
-#define TUNNEL_KEY             __cpu_to_be16(0x04)
-#define TUNNEL_SEQ             __cpu_to_be16(0x08)
-#define TUNNEL_STRICT          __cpu_to_be16(0x10)
-#define TUNNEL_REC             __cpu_to_be16(0x20)
-#define TUNNEL_VERSION         __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY          __cpu_to_be16(0x80)
-#define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
-#define TUNNEL_OAM             __cpu_to_be16(0x0200)
-#define TUNNEL_CRIT_OPT                __cpu_to_be16(0x0400)
-#define TUNNEL_GENEVE_OPT      __cpu_to_be16(0x0800)
-#define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
-#define TUNNEL_NOCACHE         __cpu_to_be16(0x2000)
-#define TUNNEL_ERSPAN_OPT      __cpu_to_be16(0x4000)
-
-#define TUNNEL_OPTIONS_PRESENT \
-               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-
 struct tnl_ptk_info {
        __be16 flags;
        __be16 proto;
index f665d74ae509b41d3ffe10faad4065b61f369341..0e3a09380655e00584d6199f357f2427d2f7e704 100644 (file)
@@ -2340,22 +2340,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
 
 /**
- * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
  * @sk:                socket sending this packet
  * @tsflags:   timestamping flags to use
  * @tx_flags:  completed with instructions for time stamping
+ * @tskey:      filled in with next sk_tskey (not for TCP, which uses seqno)
  *
  * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
  */
-static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
-                                    __u8 *tx_flags)
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+                                     __u8 *tx_flags, __u32 *tskey)
 {
-       if (unlikely(tsflags))
+       if (unlikely(tsflags)) {
                __sock_tx_timestamp(tsflags, tx_flags);
+               if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+                   tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+                       *tskey = sk->sk_tskey++;
+       }
        if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
                *tx_flags |= SKBTX_WIFI_STATUS;
 }
 
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+                                    __u8 *tx_flags)
+{
+       _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+       _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+                          &skb_shinfo(skb)->tskey);
+}
+
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
  * @sk: socket to eat this skb from
index bab5627ff5e393502fc5ed4eb7bd33cae5d02d14..3cbcd12303fd68fc604d61d4334cde643e10395a 100644 (file)
  *
  * void (*unhash)(struct tls_device *device, struct sock *sk);
  *     This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ *     Release the registered device and allocated resources
+ * @kref: Number of reference to tls_device
  */
 struct tls_device {
        char name[TLS_DEVICE_NAME_MAX];
@@ -83,6 +87,8 @@ struct tls_device {
        int  (*feature)(struct tls_device *device);
        int  (*hash)(struct tls_device *device, struct sock *sk);
        void (*unhash)(struct tls_device *device, struct sock *sk);
+       void (*release)(struct kref *kref);
+       struct kref kref;
 };
 
 enum {
index 0eb390c205af023eca277c8ca5db85faaea8fa4a..da588def3c61d45767ebc7a68994fe3958469b50 100644 (file)
@@ -1552,6 +1552,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                    int (*func)(struct xfrm_state *, int, void*), void *);
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
 struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
                                   const xfrm_address_t *saddr,
                                   const struct flowi *fl,
index 21381449d98a88f0b5f68942b307128a4269926a..355c4ac2c0b0d04ae334f99ae2d9dea86a9cad9c 100644 (file)
@@ -3,6 +3,7 @@
 #
 mandatory-y += auxvec.h
 mandatory-y += bitsperlong.h
+mandatory-y += bpf_perf_event.h
 mandatory-y += byteorder.h
 mandatory-y += errno.h
 mandatory-y += fcntl.h
index 8f08ff9bdea091a001094119f753046c79f7dd9f..6fa38d001d84ff5af90fe6014e7874d18f8e2bc1 100644 (file)
@@ -141,7 +141,7 @@ struct blk_zone_range {
  */
 #define BLKREPORTZONE  _IOWR(0x12, 130, struct blk_zone_report)
 #define BLKRESETZONE   _IOW(0x12, 131, struct blk_zone_range)
-#define BLKGETZONESZ   _IOW(0x12, 132, __u32)
-#define BLKGETNRZONES  _IOW(0x12, 133, __u32)
+#define BLKGETZONESZ   _IOR(0x12, 132, __u32)
+#define BLKGETNRZONES  _IOR(0x12, 133, __u32)
 
 #endif /* _UAPI_BLKZONED_H */
index 1b3d148c4560813089b13215e1ab9b961c8c54e0..7d9105533c7b93b0e92d7679292a8c6447a8d08c 100644 (file)
@@ -160,4 +160,24 @@ enum {
 };
 
 #define IFLA_VTI_MAX   (__IFLA_VTI_MAX - 1)
+
+#define TUNNEL_CSUM            __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING         __cpu_to_be16(0x02)
+#define TUNNEL_KEY             __cpu_to_be16(0x04)
+#define TUNNEL_SEQ             __cpu_to_be16(0x08)
+#define TUNNEL_STRICT          __cpu_to_be16(0x10)
+#define TUNNEL_REC             __cpu_to_be16(0x20)
+#define TUNNEL_VERSION         __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY          __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT    __cpu_to_be16(0x0100)
+#define TUNNEL_OAM             __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT                __cpu_to_be16(0x0400)
+#define TUNNEL_GENEVE_OPT      __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT       __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE         __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT      __cpu_to_be16(0x4000)
+
+#define TUNNEL_OPTIONS_PRESENT \
+               (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+
 #endif /* _UAPI_IF_TUNNEL_H_ */
index 48e8a225b985ae646b4bba2cdcdf62ca9c42a7a6..f6052e70bf403950eb658cd350337162a100df5f 100644 (file)
@@ -266,10 +266,14 @@ struct sockaddr_in {
 
 #define        IN_CLASSD(a)            ((((long int) (a)) & 0xf0000000) == 0xe0000000)
 #define        IN_MULTICAST(a)         IN_CLASSD(a)
-#define IN_MULTICAST_NET       0xF0000000
+#define        IN_MULTICAST_NET        0xe0000000
 
-#define        IN_EXPERIMENTAL(a)      ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define        IN_BADCLASS(a)          IN_EXPERIMENTAL((a))
+#define        IN_BADCLASS(a)          ((((long int) (a) ) == 0xffffffff)
+#define        IN_EXPERIMENTAL(a)      IN_BADCLASS((a))
+
+#define        IN_CLASSE(a)            ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define        IN_CLASSE_NET           0xffffffff
+#define        IN_CLASSE_NSHIFT        0
 
 /* Address to accept any incoming messages. */
 #define        INADDR_ANY              ((unsigned long int) 0x00000000)
index 3eb5a4c3d60a94fbc3cbc35deb71f56eb7efe85a..ae366b87426accef1c49b4fdeee478717e0d60ea 100644 (file)
 
 #define ABS_MISC               0x28
 
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED           0x2e
+
 #define ABS_MT_SLOT            0x2f    /* MT slot being modified */
 #define ABS_MT_TOUCH_MAJOR     0x30    /* Major axis of touching ellipse */
 #define ABS_MT_TOUCH_MINOR     0x31    /* Minor axis (omit if circular) */
index 97ff3c17ec4d2021a728c71141f1baa39f707eee..e5b39721c6e4877c1235b2708956474c80f4761f 100644 (file)
@@ -155,8 +155,8 @@ enum txtime_flags {
 };
 
 struct sock_txtime {
-       clockid_t       clockid;        /* reference clockid */
-       __u32           flags;          /* as defined by enum txtime_flags */
+       __kernel_clockid_t      clockid;/* reference clockid */
+       __u32                   flags;  /* as defined by enum txtime_flags */
 };
 
 #endif /* _NET_TIMESTAMPING_H */
index 486ed1f0c0bc17f48dca895ebf9581aa7d69278d..0a4d73317759c9ee523d7d5482548b3171c821e6 100644 (file)
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
-#define NETLINK_DUMP_STRICT_CHK                12
+#define NETLINK_GET_STRICT_CHK         12
 
 struct nl_pktinfo {
        __u32   group;
index 998983a6e6b712f76b503fc8b2950554d582659f..3dcfc6148f99bd5734652369a60c088a30c51b01 100644 (file)
@@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode {
 #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE          (V4L2_CID_MPEG_BASE+228)
 #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME            (V4L2_CID_MPEG_BASE+229)
 
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS         (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION         (V4L2_CID_MPEG_BASE+251)
-
 #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP            (V4L2_CID_MPEG_BASE+300)
 #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP            (V4L2_CID_MPEG_BASE+301)
 #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP            (V4L2_CID_MPEG_BASE+302)
@@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode {
 #define V4L2_CID_DETECT_MD_THRESHOLD_GRID      (V4L2_CID_DETECT_CLASS_BASE + 3)
 #define V4L2_CID_DETECT_MD_REGION_GRID         (V4L2_CID_DETECT_CLASS_BASE + 4)
 
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I       1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P       2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B       3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D       4
-
-struct v4l2_mpeg2_sequence {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
-       __u16   horizontal_size;
-       __u16   vertical_size;
-       __u32   vbv_buffer_size;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
-       __u8    profile_and_level_indication;
-       __u8    progressive_sequence;
-       __u8    chroma_format;
-       __u8    pad;
-};
-
-struct v4l2_mpeg2_picture {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
-       __u8    picture_coding_type;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
-       __u8    f_code[2][2];
-       __u8    intra_dc_precision;
-       __u8    picture_structure;
-       __u8    top_field_first;
-       __u8    frame_pred_frame_dct;
-       __u8    concealment_motion_vectors;
-       __u8    q_scale_type;
-       __u8    intra_vlc_format;
-       __u8    alternate_scan;
-       __u8    repeat_first_field;
-       __u8    progressive_frame;
-       __u8    pad;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
-       __u32   bit_size;
-       __u32   data_bit_offset;
-
-       struct v4l2_mpeg2_sequence sequence;
-       struct v4l2_mpeg2_picture picture;
-
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
-       __u8    quantiser_scale_code;
-
-       __u8    backward_ref_index;
-       __u8    forward_ref_index;
-       __u8    pad;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
-       /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
-       __u8    load_intra_quantiser_matrix;
-       __u8    load_non_intra_quantiser_matrix;
-       __u8    load_chroma_intra_quantiser_matrix;
-       __u8    load_chroma_non_intra_quantiser_matrix;
-
-       __u8    intra_quantiser_matrix[64];
-       __u8    non_intra_quantiser_matrix[64];
-       __u8    chroma_intra_quantiser_matrix[64];
-       __u8    chroma_non_intra_quantiser_matrix[64];
-};
-
 #endif
index c8e8ff810190c22ad1bec8a6af068109106415b7..2ba2ad0e23fbb67d54f886ed6f292308b2815bb6 100644 (file)
@@ -1622,8 +1622,6 @@ struct v4l2_ext_control {
                __u8 __user *p_u8;
                __u16 __user *p_u16;
                __u32 __user *p_u32;
-               struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
-               struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
                void __user *ptr;
        };
 } __attribute__ ((packed));
@@ -1669,8 +1667,6 @@ enum v4l2_ctrl_type {
        V4L2_CTRL_TYPE_U8            = 0x0100,
        V4L2_CTRL_TYPE_U16           = 0x0101,
        V4L2_CTRL_TYPE_U32           = 0x0102,
-       V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
-       V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
 };
 
 /*  Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
index cf5b5a0dcbc2f04a5a9812f6272a1b54d7fd06b0..ed9352513c324853507995a60b9fc220286690bd 100644 (file)
@@ -515,8 +515,8 @@ config PSI_DEFAULT_DISABLED
        depends on PSI
        help
          If set, pressure stall information tracking will be disabled
-         per default but can be enabled through passing psi_enable=1
-         on the kernel commandline during boot.
+         per default but can be enabled through passing psi=1 on the
+         kernel commandline during boot.
 
 endmenu # "CPU/Task time and stats accounting"
 
index b1a3545d0ec89f747d1cd51b7140fd64ad2fe6fd..b2890c268cb340cab7424ef750ec47bcc5488ad4 100644 (file)
@@ -365,13 +365,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
 }
 
 #ifdef CONFIG_BPF_JIT
-# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
-
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
 int bpf_jit_harden   __read_mostly;
 int bpf_jit_kallsyms __read_mostly;
-int bpf_jit_limit    __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+long bpf_jit_limit   __read_mostly;
 
 static __always_inline void
 bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -580,16 +578,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
 static atomic_long_t bpf_jit_current;
 
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
 #if defined(MODULES_VADDR)
+       return MODULES_END - MODULES_VADDR;
+#else
+       return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
 static int __init bpf_jit_charge_init(void)
 {
        /* Only used as heuristic here to derive limit. */
-       bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
-                                           PAGE_SIZE), INT_MAX);
+       bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+                                           PAGE_SIZE), LONG_MAX);
        return 0;
 }
 pure_initcall(bpf_jit_charge_init);
-#endif
 
 static int bpf_jit_charge_modmem(u32 pages)
 {
index fc760d00a38c497502c28b56aad1a8d426565560..51ba84d4d34a06b6557245a05211a919d23259fb 100644 (file)
@@ -5102,9 +5102,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        }
        new_sl->next = env->explored_states[insn_idx];
        env->explored_states[insn_idx] = new_sl;
-       /* connect new state to parentage chain */
-       for (i = 0; i < BPF_REG_FP; i++)
-               cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
+       /* connect new state to parentage chain. Current frame needs all
+        * registers connected. Only r6 - r9 of the callers are alive (pushed
+        * to the stack implicitly by JITs) so in callers' frames connect just
+        * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
+        * the state of the call instruction (with WRITTEN set), and r0 comes
+        * from callee with its full parentage chain, anyway.
+        */
+       for (j = 0; j <= cur->curframe; j++)
+               for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+                       cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
        /* clear write marks in current state: the writes we did are not writes
         * our child did, so they don't screen off its reads from us.
         * (There are no read marks in current state, because reads always mark
index 22a12ab5a5e9aaac7ac5207d70b059bd75e48965..375c77e8d52fa0aa17d4dee12651836fc7981556 100644 (file)
@@ -309,7 +309,12 @@ int dma_direct_supported(struct device *dev, u64 mask)
 
        min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
 
-       return mask >= phys_to_dma(dev, min_mask);
+       /*
+        * This check needs to be against the actual bit mask value, so
+        * use __phys_to_dma() here so that the SME encryption mask isn't
+        * part of the check.
+        */
+       return mask >= __phys_to_dma(dev, min_mask);
 }
 
 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
index 77734451cb05d3876a123a6c620c33287cc8cc33..e23eb9fc77aa91d542b96de42179b042b9fa1368 100644 (file)
@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
        if (ops->flags & FTRACE_OPS_FL_ENABLED)
                ftrace_shutdown(ops, 0);
        ops->flags |= FTRACE_OPS_FL_DELETED;
+       ftrace_free_filter(ops);
        mutex_unlock(&ftrace_lock);
 }
 
index 84a65173b1e91dc347e5d98b1a831da42c1bd7bb..5574e862de8d52de579cdaac347891e82dd2548b 100644 (file)
@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                }
        }
 
+       kfree(op_stack);
+       kfree(inverts);
        return prog;
 out_free:
        kfree(op_stack);
-       kfree(prog_stack);
        kfree(inverts);
+       kfree(prog_stack);
        return ERR_PTR(ret);
 }
 
@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
        err = process_preds(call, filter_string, *filterp, pe);
        if (err && set_str)
                append_filter_err(pe, *filterp);
+       create_filter_finish(pe);
 
        return err;
 }
index 2152d1e530cb5e8b8b1020840b1613b3099caebe..cd12ecb66eb9236e3ca741517ea55105d744ab6e 100644 (file)
@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
 
        /* The filter is for the 'trigger' event, not the triggered event */
        ret = create_event_filter(file->event_call, filter_str, false, &filter);
-       if (ret)
-               goto out;
+       /*
+        * If create_event_filter() fails, filter still needs to be freed.
+        * Which the calling code will do with data->filter.
+        */
  assign:
        tmp = rcu_access_pointer(data->filter);
 
index 1106bb6aa01e977de26bcb1235080b00b0e4a067..14d51548bea6414f9b256dd0179830d07dce1581 100644 (file)
@@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
        while (radix_tree_is_internal_node(node)) {
                unsigned offset;
 
-               if (node == RADIX_TREE_RETRY)
-                       goto restart;
                parent = entry_to_node(node);
                offset = radix_tree_descend(parent, &node, index);
                slot = parent->slots + offset;
+               if (node == RADIX_TREE_RETRY)
+                       goto restart;
                if (parent->shift == 0)
                        break;
        }
index 0598e86af8fc327266988a273f5826730d951e0d..4676c0a1eeca0f7f7c559176b2c580cedcccce6d 100644 (file)
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
 } while (0)
 #endif
 
+static void *xa_mk_index(unsigned long index)
+{
+       return xa_mk_value(index & LONG_MAX);
+}
+
 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
-       return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+       return xa_store(xa, index, xa_mk_index(index), gfp);
 }
 
 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
        u32 id = 0;
 
-       XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+       XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
                                gfp) != 0);
        XA_BUG_ON(xa, id != index);
 }
 
 static void xa_erase_index(struct xarray *xa, unsigned long index)
 {
-       XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+       XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
        XA_BUG_ON(xa, xa_load(xa, index) != NULL);
 }
 
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
 
        xas_set(&xas, 0);
        xas_for_each(&xas, entry, ULONG_MAX) {
-               xas_store(&xas, xa_mk_value(xas.xa_index));
+               xas_store(&xas, xa_mk_index(xas.xa_index));
        }
        xas_unlock(&xas);
 
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
                XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
                xa_set_mark(xa, index + 2, XA_MARK_1);
                XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
-               xa_store_order(xa, index, order, xa_mk_value(index),
+               xa_store_order(xa, index, order, xa_mk_index(index),
                                GFP_KERNEL);
                for (i = base; i < next; i++) {
                        XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
                        xas_set(&xas, j);
                        do {
                                xas_lock(&xas);
-                               xas_store(&xas, xa_mk_value(j));
+                               xas_store(&xas, xa_mk_index(j));
                                xas_unlock(&xas);
                        } while (xas_nomem(&xas, GFP_KERNEL));
                }
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
                xas_set(&xas, 0);
                j = i;
                xas_for_each(&xas, entry, ULONG_MAX) {
-                       XA_BUG_ON(xa, entry != xa_mk_value(j));
+                       XA_BUG_ON(xa, entry != xa_mk_index(j));
                        xas_store(&xas, NULL);
                        j++;
                }
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
        unsigned long min = index & ~((1UL << order) - 1);
        unsigned long max = min + (1UL << order);
 
-       xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
-       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
-       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
+       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
        XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
        xas_lock(&xas);
-       XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+       XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
        xas_unlock(&xas);
-       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
-       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+       XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
+       XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
        XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
@@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
        xas_unlock(&xas);
        XA_BUG_ON(xa, !xa_empty(xa));
 }
+
+static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
+               unsigned int order)
+{
+       XA_STATE(xas, xa, 0);
+       void *entry;
+       int n = 0;
+
+       xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+       xas_lock(&xas);
+       xas_for_each(&xas, entry, ULONG_MAX) {
+               XA_BUG_ON(xa, entry != xa_mk_index(index));
+               n++;
+       }
+       XA_BUG_ON(xa, n != 1);
+       xas_set(&xas, index + 1);
+       xas_for_each(&xas, entry, ULONG_MAX) {
+               XA_BUG_ON(xa, entry != xa_mk_index(index));
+               n++;
+       }
+       XA_BUG_ON(xa, n != 2);
+       xas_unlock(&xas);
+
+       xa_destroy(xa);
+}
 #endif
 
 static noinline void check_multi_store(struct xarray *xa)
@@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)
 
        for (i = 0; i < max_order; i++) {
                for (j = 0; j < max_order; j++) {
-                       xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
-                       xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+                       xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
+                       xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
 
                        for (k = 0; k < max_order; k++) {
                                void *entry = xa_load(xa, (1UL << k) - 1);
                                if ((i < k) && (j < k))
                                        XA_BUG_ON(xa, entry != NULL);
                                else
-                                       XA_BUG_ON(xa, entry != xa_mk_value(j));
+                                       XA_BUG_ON(xa, entry != xa_mk_index(j));
                        }
 
                        xa_erase(xa, 0);
@@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)
                check_multi_store_1(xa, (1UL << i) + 1, i);
        }
        check_multi_store_2(xa, 4095, 9);
+
+       for (i = 1; i < 20; i++) {
+               check_multi_store_3(xa, 0, i);
+               check_multi_store_3(xa, 1UL << i, i);
+       }
 #endif
 }
 
@@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)
        xa_destroy(&xa0);
 
        id = 0xfffffffeU;
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != 0);
        XA_BUG_ON(&xa0, id != 0xfffffffeU);
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != 0);
        XA_BUG_ON(&xa0, id != 0xffffffffU);
-       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
                                GFP_KERNEL) != -ENOSPC);
        XA_BUG_ON(&xa0, id != 0xffffffffU);
        xa_destroy(&xa0);
+
+       id = 10;
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+                               GFP_KERNEL) != -ENOSPC);
+       XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
+       XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+                               GFP_KERNEL) != -ENOSPC);
+       xa_erase_index(&xa0, 3);
+       XA_BUG_ON(&xa0, !xa_empty(&xa0));
 }
 
 static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -610,11 +655,11 @@ retry:
        xas_lock(&xas);
        xas_for_each_conflict(&xas, entry) {
                XA_BUG_ON(xa, !xa_is_value(entry));
-               XA_BUG_ON(xa, entry < xa_mk_value(start));
-               XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+               XA_BUG_ON(xa, entry < xa_mk_index(start));
+               XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
                count++;
        }
-       xas_store(&xas, xa_mk_value(start));
+       xas_store(&xas, xa_mk_index(start));
        xas_unlock(&xas);
        if (xas_nomem(&xas, GFP_KERNEL)) {
                count = 0;
@@ -622,9 +667,9 @@ retry:
        }
        XA_BUG_ON(xa, xas_error(&xas));
        XA_BUG_ON(xa, count != present);
-       XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+       XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
        XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
-                       xa_mk_value(start));
+                       xa_mk_index(start));
        xa_erase_index(xa, start);
 }
 
@@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
                for (j = 0; j < index; j++) {
                        XA_STATE(xas, xa, j + index);
                        xa_store_index(xa, index - 1, GFP_KERNEL);
-                       xa_store_order(xa, index, i, xa_mk_value(index),
+                       xa_store_order(xa, index, i, xa_mk_index(index),
                                        GFP_KERNEL);
                        rcu_read_lock();
                        xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)
                j = 0;
                index = 0;
                xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
-                       XA_BUG_ON(xa, xa_mk_value(index) != entry);
+                       XA_BUG_ON(xa, xa_mk_index(index) != entry);
                        XA_BUG_ON(xa, index != j++);
                }
        }
@@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)
        xa_destroy(xa);
 }
 
+static noinline void check_find_3(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+       unsigned long i, j, k;
+       void *entry;
+
+       for (i = 0; i < 100; i++) {
+               for (j = 0; j < 100; j++) {
+                       for (k = 0; k < 100; k++) {
+                               xas_set(&xas, j);
+                               xas_for_each_marked(&xas, entry, k, XA_MARK_0)
+                                       ;
+                               if (j > k)
+                                       XA_BUG_ON(xa,
+                                               xas.xa_node != XAS_RESTART);
+                       }
+               }
+               xa_store_index(xa, i, GFP_KERNEL);
+               xa_set_mark(xa, i, XA_MARK_0);
+       }
+       xa_destroy(xa);
+}
+
 static noinline void check_find(struct xarray *xa)
 {
        check_find_1(xa);
        check_find_2(xa);
+       check_find_3(xa);
        check_multi_find(xa);
        check_multi_find_2(xa);
 }
@@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)
                        for (index = 0; index < (1UL << (order + 5));
                             index += (1UL << order)) {
                                xa_store_order(xa, index, order,
-                                               xa_mk_value(index), GFP_KERNEL);
+                                               xa_mk_index(index), GFP_KERNEL);
                                XA_BUG_ON(xa, xa_load(xa, index) !=
-                                               xa_mk_value(index));
+                                               xa_mk_index(index));
                                XA_BUG_ON(xa, xa_find_entry(xa,
-                                               xa_mk_value(index)) != index);
+                                               xa_mk_index(index)) != index);
                        }
                        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
                        xa_destroy(xa);
@@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)
        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
        xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
        XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
-       XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+       XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
        xa_erase_index(xa, ULONG_MAX);
        XA_BUG_ON(xa, !xa_empty(xa));
 }
@@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
                        XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
                XA_BUG_ON(xa, xas.xa_index != i);
                if (i == 0 || i == idx)
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
        }
@@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
                        XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
                XA_BUG_ON(xa, xas.xa_index != i);
                if (i == 0 || i == idx)
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
        } while (i > 0);
@@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)
        do {
                void *entry = xas_prev(&xas);
                i--;
-               XA_BUG_ON(xa, entry != xa_mk_value(i));
+               XA_BUG_ON(xa, entry != xa_mk_index(i));
                XA_BUG_ON(xa, i != xas.xa_index);
        } while (i != 0);
 
@@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)
 
        do {
                void *entry = xas_next(&xas);
-               XA_BUG_ON(xa, entry != xa_mk_value(i));
+               XA_BUG_ON(xa, entry != xa_mk_index(i));
                XA_BUG_ON(xa, i != xas.xa_index);
                i++;
        } while (i < (1 << 16));
@@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)
                void *entry = xas_prev(&xas);
                i--;
                if ((i < (1 << 8)) || (i >= (1 << 15)))
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
                XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)
        do {
                void *entry = xas_next(&xas);
                if ((i < (1 << 8)) || (i >= (1 << 15)))
-                       XA_BUG_ON(xa, entry != xa_mk_value(i));
+                       XA_BUG_ON(xa, entry != xa_mk_index(i));
                else
                        XA_BUG_ON(xa, entry != NULL);
                XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < (1U << order); i++) {
-                       XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+                       XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
                        xas_next(&xas);
                }
 unlock:
@@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,
                if (xas_error(&xas))
                        goto unlock;
                for (i = 0; i < (1UL << order); i++) {
-                       void *old = xas_store(&xas, xa_mk_value(base + i));
+                       void *old = xas_store(&xas, xa_mk_index(base + i));
                        if (xas.xa_index == index)
-                               XA_BUG_ON(xa, old != xa_mk_value(base + i));
+                               XA_BUG_ON(xa, old != xa_mk_index(base + i));
                        else
                                XA_BUG_ON(xa, old != NULL);
                        xas_next(&xas);
@@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
                unsigned long last)
 {
 #ifdef CONFIG_XARRAY_MULTI
-       xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+       xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
 
-       XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
-       XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+       XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
+       XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
        XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
        XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
 
@@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)
                XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
                rcu_read_unlock();
 
-               xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+               xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
                                GFP_KERNEL);
                XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
 
index bbacca576593613f25cf107485f3cd8a89896d0b..5f3f9311de893a2975990060f5dfae6a6fb3d462 100644 (file)
@@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
                entry = xa_head(xas->xa);
                xas->xa_node = NULL;
                if (xas->xa_index > max_index(entry))
-                       goto bounds;
+                       goto out;
                if (!xa_is_node(entry)) {
                        if (xa_marked(xas->xa, mark))
                                return entry;
@@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
        }
 
 out:
-       if (!max)
+       if (xas->xa_index > max)
                goto max;
-bounds:
-       xas->xa_node = XAS_BOUNDS;
-       return NULL;
+       return set_bounds(xas);
 max:
        xas->xa_node = XAS_RESTART;
        return NULL;
index 705a3e9cc910e16472159a2d20f9585e1ff7c13d..a80832487981b456a6d9c287134abc329f4c48e3 100644 (file)
@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
                (struct hugepage_subpool *)page_private(page);
        bool restore_reserve;
 
-       set_page_private(page, 0);
-       page->mapping = NULL;
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
+
+       set_page_private(page, 0);
+       page->mapping = NULL;
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
index 9a2d5ae81ae1cf4217ed3174d72667be276769da..81ae63ca78d0bb8cb20d615c28f41795d57b2827 100644 (file)
@@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr
        return -1;
 }
 
-bool __init memblock_is_reserved(phys_addr_t addr)
+bool __init_memblock memblock_is_reserved(phys_addr_t addr)
 {
        return memblock_search(&memblock.reserved, addr) != -1;
 }
index 921f80488bb3fdd03cc7ce64a5a5a6d0f015b794..5d07e0b1352f3f2c81081a0eae4fb772566d057c 100644 (file)
@@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping,
 {
        void *old;
 
-       xa_lock_irq(&mapping->i_pages);
-       old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
-       xa_unlock_irq(&mapping->i_pages);
+       old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
        if (old != radswap)
                return -ENOENT;
        free_swap_and_cache(radix_to_swp_entry(radswap));
index 33307fc05c4d3372d5e4746116330532765e61d7..3abc8cc5020120a3bbeb7c3429d19472fc96d051 100644 (file)
@@ -239,6 +239,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
        }
 }
 
+/*
+ * Mark all memblocks as present using memory_present(). This is a
+ * convienence function that is useful for a number of arches
+ * to mark all of the systems memory as present during initialization.
+ */
+void __init memblocks_present(void)
+{
+       struct memblock_region *reg;
+
+       for_each_memblock(memory, reg) {
+               memory_present(memblock_get_region_node(reg),
+                              memblock_region_memory_base_pfn(reg),
+                              memblock_region_memory_end_pfn(reg));
+       }
+}
+
 /*
  * Subtle, we encode the real pfn into the mem_map such that
  * the identity pfn - section_mem_map will return the actual
index 3aab7664933fdc67770cdbc8b80a231b41dc2555..c70207537488ff3c4f6183a725cf6334d2d042eb 100644 (file)
@@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err < 0)
                goto free_skb;
 
-       sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sk->sk_tsflags);
 
        skb->dev = dev;
        skb->sk  = sk;
index 588f475019d47c9d6bae8883acebab48aaf63b48..af68207ee56c34324939e2a0ca0b0e6a87dee2fc 100644 (file)
@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                /* Pass parameters to the BPF program */
                cb->qdisc_cb.flow_keys = &flow_keys;
                flow_keys.nhoff = nhoff;
+               flow_keys.thoff = nhoff;
 
                bpf_compute_data_pointers((struct sk_buff *)skb);
                result = BPF_PROG_RUN(attached, skb);
@@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
                /* Restore state */
                memcpy(cb, &cb_saved, sizeof(cb_saved));
 
+               flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
+               flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
+                                         flow_keys.nhoff, skb->len);
+
                __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
                                         target_container);
-               key_control->thoff = min_t(u16, key_control->thoff, skb->len);
                rcu_read_unlock();
                return result == BPF_OK;
        }
index 4b54e5f107c6a3a50031a6e19fb249a15086b892..acf45ddbe924e62c5a8382ac3f9b176dfe56ca7e 100644 (file)
@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
        for_each_possible_cpu(i) {
                struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
+               napi_disable(&cell->napi);
                netif_napi_del(&cell->napi);
                __skb_queue_purge(&cell->napi_skbs);
        }
index 41954e42a2ded2ac1770c4b107505a5eee57a2be..5fa32c064bafdff8e83dff2bfe081e1fa0090229 100644 (file)
@@ -2494,11 +2494,16 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
 
                ndm = nlmsg_data(nlh);
                if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_ifindex ||
-                   ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+                   ndm->ndm_state || ndm->ndm_type) {
                        NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
                        return -EINVAL;
                }
 
+               if (ndm->ndm_flags & ~NTF_PROXY) {
+                       NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
+                       return -EINVAL;
+               }
+
                err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
                                         NULL, extack);
        } else {
index 37b4667128a3808395e23b0c53325a5d937c6b54..d67ec17f2cc862852e892dd279522433dcbd73cd 100644 (file)
@@ -28,6 +28,8 @@ static int two __maybe_unused = 2;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 static int max_skb_frags = MAX_SKB_FRAGS;
+static long long_one __maybe_unused = 1;
+static long long_max __maybe_unused = LONG_MAX;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
 
        return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 }
+
+static int
+proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+                                    void __user *buffer, size_t *lenp,
+                                    loff_t *ppos)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
 #endif
 
 static struct ctl_table net_core_table[] = {
@@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = {
        {
                .procname       = "bpf_jit_limit",
                .data           = &bpf_jit_limit,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(long),
                .mode           = 0600,
-               .proc_handler   = proc_dointvec_minmax_bpf_restricted,
-               .extra1         = &one,
+               .proc_handler   = proc_dolongvec_minmax_bpf_restricted,
+               .extra1         = &long_one,
+               .extra2         = &long_max,
        },
 #endif
        {
index a34602ae27dee2d3ff5d579172910c5f9f004292..608a6f4223fb964c487f3bd9c4e074f3f34167d7 100644 (file)
@@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr)
 {
        int rc = -1;    /* Something else, probably a multicast. */
 
-       if (ipv4_is_zeronet(addr))
+       if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
                rc = 0;
        else {
                __u32 haddr = ntohl(addr);
-
                if (IN_CLASSA(haddr))
                        rc = 8;
                else if (IN_CLASSB(haddr))
                        rc = 16;
                else if (IN_CLASSC(haddr))
                        rc = 24;
+               else if (IN_CLASSE(haddr))
+                       rc = 32;
        }
 
        return rc;
index 32662e9e5d218868341169bba1dc3ab430952c58..d5984d31ab931487fdea844aa4a240df00d2b64e 100644 (file)
@@ -72,6 +72,7 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
        if (unlikely(opt->optlen))
                ip_forward_options(skb);
 
+       skb->tstamp = 0;
        return dst_output(net, sk, skb);
 }
 
index aa0b22697998ab60f0013bf65cea9cef2913f61f..867be8f7f1fa03063c35310b1fd37a65e5c726fb 100644 (file)
@@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
        struct rb_node **rbn, *parent;
        struct sk_buff *skb1, *prev_tail;
+       int ihl, end, skb1_run_end;
        struct net_device *dev;
        unsigned int fragsize;
        int flags, offset;
-       int ihl, end;
        int err = -ENOENT;
        u8 ecn;
 
@@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
         *   overlapping fragment, the entire datagram (and any constituent
         *   fragments) MUST be silently discarded.
         *
-        * We do the same here for IPv4 (and increment an snmp counter).
+        * We do the same here for IPv4 (and increment an snmp counter) but
+        * we do not want to drop the whole queue in response to a duplicate
+        * fragment.
         */
 
        err = -EINVAL;
@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                do {
                        parent = *rbn;
                        skb1 = rb_to_skb(parent);
+                       skb1_run_end = skb1->ip_defrag_offset +
+                                      FRAG_CB(skb1)->frag_run_len;
                        if (end <= skb1->ip_defrag_offset)
                                rbn = &parent->rb_left;
-                       else if (offset >= skb1->ip_defrag_offset +
-                                               FRAG_CB(skb1)->frag_run_len)
+                       else if (offset >= skb1_run_end)
                                rbn = &parent->rb_right;
-                       else /* Found an overlap with skb1. */
-                               goto overlap;
+                       else if (offset >= skb1->ip_defrag_offset &&
+                                end <= skb1_run_end)
+                               goto err; /* No new data, potential duplicate */
+                       else
+                               goto overlap; /* Found an overlap */
                } while (*rbn);
                /* Here we have parent properly set, and rbn pointing to
                 * one of its NULL left/right children. Insert skb.
index 88212615bf4ce75f5e840297b1be70f4e4e0b83b..2393e5c106bfa490dc2f45c6fbb843532bffc404 100644 (file)
@@ -429,6 +429,8 @@ static int __init ic_defaults(void)
                        ic_netmask = htonl(IN_CLASSB_NET);
                else if (IN_CLASSC(ntohl(ic_myaddr)))
                        ic_netmask = htonl(IN_CLASSC_NET);
+               else if (IN_CLASSE(ntohl(ic_myaddr)))
+                       ic_netmask = htonl(IN_CLASSE_NET);
                else {
                        pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
                               &ic_myaddr);
index a6defbec4f1b4338993756ea7b8681012450aa1a..e7a3879cedd0a13610027fa88dfa2a8f715ba9f0 100644 (file)
@@ -69,6 +69,8 @@
 #include <net/nexthop.h>
 #include <net/switchdev.h>
 
+#include <linux/nospec.h>
+
 struct ipmr_rule {
        struct fib_rule         common;
 };
@@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.vifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.vifi];
                if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.vifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.vifi];
                if (VIF_EXISTS(mrt, vr.vifi)) {
index 8ca3eb06ba04246ce9f53045488f1da57dc8e689..169a652b3dd1a7c2d9d6401236f76f76e427562c 100644 (file)
@@ -391,7 +391,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->ip_summed = CHECKSUM_NONE;
 
-       sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
 
        if (flags & MSG_CONFIRM)
                skb_set_dst_pending_confirm(skb, 1);
index fcd3c66ded1620d0d320fd7b1753f2734d1dfa46..4591ca4bdbe8831f8183c4df0d4237d023b5bb68 100644 (file)
@@ -378,6 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
        __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
        __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 
+       skb->tstamp = 0;
        return dst_output(net, sk, skb);
 }
 
index b283f293ee4ae7537da0bde51b5a4695a2e6f249..caad40d6e74d5c12b5215b0acda1f78e53da7955 100644 (file)
@@ -15,7 +15,7 @@
 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
                     struct socket **sockp)
 {
-       struct sockaddr_in6 udp6_addr;
+       struct sockaddr_in6 udp6_addr = {};
        int err;
        struct socket *sock = NULL;
 
@@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
                goto error;
 
        if (cfg->peer_udp_port) {
+               memset(&udp6_addr, 0, sizeof(udp6_addr));
                udp6_addr.sin6_family = AF_INET6;
                memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
                       sizeof(udp6_addr.sin6_addr));
index e2ea691e42c6d6efe4ddf845e01422bf31e9fc4e..377a2ee5d9ad8a6aec96a5fea7f63e6890399771 100644 (file)
@@ -52,6 +52,8 @@
 #include <net/ip6_checksum.h>
 #include <linux/netconf.h>
 
+#include <linux/nospec.h>
+
 struct ip6mr_rule {
        struct fib_rule         common;
 };
@@ -1841,6 +1843,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.mifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.mifi];
                if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1915,6 +1918,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
                        return -EFAULT;
                if (vr.mifi >= mrt->maxvif)
                        return -EINVAL;
+               vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
                read_lock(&mrt_lock);
                vif = &mrt->vif_table[vr.mifi];
                if (VIF_EXISTS(mrt, vr.mifi)) {
index 5e0efd3954e90ade89eb4da17cd5ecef1894a1a3..fc2b5e845fdf3e2f8ccafbccbbfc7465af0fa13b 100644 (file)
@@ -658,6 +658,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
+
        if (flags & MSG_CONFIRM)
                skb_set_dst_pending_confirm(skb, 1);
 
index 5f3c81e705c7df9ea7ff7c69eb3b6aff00df17ee..3a0171a65db320d5a42eb84a2fbcaff2c85a00d3 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (c) 2016        Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1951,6 +1952,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        WARN(local->open_count, "%s: open count remains %d\n",
             wiphy_name(local->hw.wiphy), local->open_count);
 
+       ieee80211_txq_teardown_flows(local);
+
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
                list_del(&sdata->list);
index 83e71e6b2ebe04f244b574fcdfe8b95f1a48ca00..7b8320d4a8e4bf42475277db8e8acf59ba7e3fa6 100644 (file)
@@ -1262,7 +1262,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        rtnl_unlock();
        ieee80211_led_exit(local);
        ieee80211_wep_free(local);
-       ieee80211_txq_teardown_flows(local);
  fail_flows:
        destroy_workqueue(local->workqueue);
  fail_workqueue:
@@ -1288,7 +1287,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
-       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
index a794ca7290001a778d7ef5fde62a7c32b7c110b1..3f0b96e1e02fa572bde737a0cc3f98967fe8ac0f 100644 (file)
@@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
        }
 
        ieee80211_led_tx(local);
+
+       if (skb_has_frag_list(skb)) {
+               kfree_skb_list(skb_shinfo(skb)->frag_list);
+               skb_shinfo(skb)->frag_list = NULL;
+       }
 }
 
 /*
index 4eef55da0878e299d0bb912fa7ea69d3d4e91441..8da228da53ae98e0befbcede2d3b3c8f394aefc2 100644 (file)
@@ -531,8 +531,8 @@ nla_put_failure:
                ret = -EMSGSIZE;
        } else {
                cb->args[IPSET_CB_ARG0] = i;
+               ipset_nest_end(skb, atd);
        }
-       ipset_nest_end(skb, atd);
 out:
        rcu_read_unlock();
        return ret;
index b6d0f6deea86c34437f997533df3f4f7f4f6bd50..9cd180bda0920304467165cef1365c7291575347 100644 (file)
@@ -427,7 +427,7 @@ insert_tree(struct net *net,
        count = 1;
        rbconn->list.count = count;
 
-       rb_link_node(&rbconn->node, parent, rbnode);
+       rb_link_node_rcu(&rbconn->node, parent, rbnode);
        rb_insert_color(&rbconn->node, root);
 out_unlock:
        spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
index a975efd6b8c3e3baac2a80334237be9201ace130..9da303461069cd9ea353ab9fa173b5b4322a665f 100644 (file)
@@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
 /* TCP SACK sequence number adjustment */
 static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
                                      unsigned int protoff,
-                                     struct tcphdr *tcph,
                                      struct nf_conn *ct,
                                      enum ip_conntrack_info ctinfo)
 {
-       unsigned int dir, optoff, optend;
+       struct tcphdr *tcph = (void *)skb->data + protoff;
        struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+       unsigned int dir, optoff, optend;
 
        optoff = protoff + sizeof(struct tcphdr);
        optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
        if (!skb_make_writable(skb, optend))
                return 0;
 
+       tcph = (void *)skb->data + protoff;
        dir = CTINFO2DIR(ctinfo);
 
        while (optoff < optend) {
@@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
                 ntohl(newack));
        tcph->ack_seq = newack;
 
-       res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+       res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
 out:
        spin_unlock_bh(&ct->lock);
 
index e2b196054dfc6dbe2eeedbee76a8ab342187e297..2268b10a9dcf844b8ce9d8d43c6b32e943553496 100644 (file)
@@ -117,7 +117,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
        dst = skb_dst(skb);
        if (dst->xfrm)
                dst = ((struct xfrm_dst *)dst)->route;
-       dst_hold(dst);
+       if (!dst_hold_safe(dst))
+               return -EHOSTUNREACH;
 
        if (sk && !net_eq(net, sock_net(sk)))
                sk = NULL;
index 2e61aab6ed731356e34df28a6c1c8d41659ad749..6e548d7c9f67bee955f887319bc3c26fdd0c5bf7 100644 (file)
@@ -1216,7 +1216,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
                        goto nla_put_failure;
 
-               if (basechain->stats && nft_dump_stats(skb, basechain->stats))
+               if (rcu_access_pointer(basechain->stats) &&
+                   nft_dump_stats(skb, rcu_dereference(basechain->stats)))
                        goto nla_put_failure;
        }
 
@@ -1392,7 +1393,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        return newstats;
 }
 
-static void nft_chain_stats_replace(struct nft_base_chain *chain,
+static void nft_chain_stats_replace(struct net *net,
+                                   struct nft_base_chain *chain,
                                    struct nft_stats __percpu *newstats)
 {
        struct nft_stats __percpu *oldstats;
@@ -1400,8 +1402,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
        if (newstats == NULL)
                return;
 
-       if (chain->stats) {
-               oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
+       if (rcu_access_pointer(chain->stats)) {
+               oldstats = rcu_dereference_protected(chain->stats,
+                                       lockdep_commit_lock_is_held(net));
                rcu_assign_pointer(chain->stats, newstats);
                synchronize_rcu();
                free_percpu(oldstats);
@@ -1439,9 +1442,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
                struct nft_base_chain *basechain = nft_base_chain(chain);
 
                module_put(basechain->type->owner);
-               free_percpu(basechain->stats);
-               if (basechain->stats)
+               if (rcu_access_pointer(basechain->stats)) {
                        static_branch_dec(&nft_counters_enabled);
+                       free_percpu(rcu_dereference_raw(basechain->stats));
+               }
                kfree(chain->name);
                kfree(basechain);
        } else {
@@ -1590,7 +1594,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                                kfree(basechain);
                                return PTR_ERR(stats);
                        }
-                       basechain->stats = stats;
+                       rcu_assign_pointer(basechain->stats, stats);
                        static_branch_inc(&nft_counters_enabled);
                }
 
@@ -6180,7 +6184,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
                return;
 
        basechain = nft_base_chain(trans->ctx.chain);
-       nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+       nft_chain_stats_replace(trans->ctx.net, basechain,
+                               nft_trans_chain_stats(trans));
 
        switch (nft_trans_chain_policy(trans)) {
        case NF_DROP:
index 3fbce3b9c5ec0f51c9841aca36c0e5fc48b233c6..a50500232b0a9fb197b2a366a0c466d363867134 100644 (file)
@@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
        struct nft_stats *stats;
 
        base_chain = nft_base_chain(chain);
-       if (!base_chain->stats)
+       if (!rcu_access_pointer(base_chain->stats))
                return;
 
        local_bh_disable();
index 6bb9f3cde0b0d82af00a6b4f212be8c0d385dd47..3c023d6120f657b098639d7fbdef56776225b676 100644 (file)
@@ -1706,7 +1706,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                        nlk->flags &= ~NETLINK_F_EXT_ACK;
                err = 0;
                break;
-       case NETLINK_DUMP_STRICT_CHK:
+       case NETLINK_GET_STRICT_CHK:
                if (val)
                        nlk->flags |= NETLINK_F_STRICT_CHK;
                else
@@ -1806,7 +1806,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                        return -EFAULT;
                err = 0;
                break;
-       case NETLINK_DUMP_STRICT_CHK:
+       case NETLINK_GET_STRICT_CHK:
                if (len < sizeof(int))
                        return -EINVAL;
                len = sizeof(int);
index a74650e98f423d752e3c49df1388e9c86cb5ee44..6655793765b2d255d051e8d892601c861c43cd72 100644 (file)
@@ -1965,7 +1965,7 @@ retry:
        skb->mark = sk->sk_mark;
        skb->tstamp = sockc.transmit_time;
 
-       sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc.tsflags);
 
        if (unlikely(extra_len == 4))
                skb->no_fcs = 1;
@@ -2460,7 +2460,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        skb->priority = po->sk.sk_priority;
        skb->mark = po->sk.sk_mark;
        skb->tstamp = sockc->transmit_time;
-       sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc->tsflags);
        skb_zcopy_set_nouarg(skb, ph.raw);
 
        skb_reserve(skb, hlen);
@@ -2898,7 +2898,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                goto out_free;
        }
 
-       sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+       skb_setup_tx_timestamp(skb, sockc.tsflags);
 
        if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
            !packet_extra_vlan_len_allowed(dev, skb)) {
index 4b00b1152a5f0a885100098e565b881ab68b7e57..f139420ba1f6837998567f9a7d1ac786147080cf 100644 (file)
@@ -308,16 +308,27 @@ out:
 /*
  * RDS ops use this to grab SG entries from the rm's sg pool.
  */
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+                                         int *ret)
 {
        struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
        struct scatterlist *sg_ret;
 
-       WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
-       WARN_ON(!nents);
+       if (WARN_ON(!ret))
+               return NULL;
 
-       if (rm->m_used_sgs + nents > rm->m_total_sgs)
+       if (nents <= 0) {
+               pr_warn("rds: alloc sgs failed! nents <= 0\n");
+               *ret = -EINVAL;
                return NULL;
+       }
+
+       if (rm->m_used_sgs + nents > rm->m_total_sgs) {
+               pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
+                       rm->m_total_sgs, rm->m_used_sgs, nents);
+               *ret = -ENOMEM;
+               return NULL;
+       }
 
        sg_ret = &sg_first[rm->m_used_sgs];
        sg_init_table(sg_ret, nents);
@@ -332,6 +343,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        unsigned int i;
        int num_sgs = ceil(total_len, PAGE_SIZE);
        int extra_bytes = num_sgs * sizeof(struct scatterlist);
+       int ret;
 
        rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
        if (!rm)
@@ -340,10 +352,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
        rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
        rm->data.op_nents = ceil(total_len, PAGE_SIZE);
-       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+       rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
        if (!rm->data.op_sg) {
                rds_message_put(rm);
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(ret);
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 98237feb607ac6f3d24ecd18ecb3320c47065d30..182ab8430594a967533fe64b44ebe41a97fab83c 100644 (file)
@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
        return tot_pages;
 }
 
-int rds_rdma_extra_size(struct rds_rdma_args *args)
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+                       struct rds_iov_vector *iov)
 {
-       struct rds_iovec vec;
+       struct rds_iovec *vec;
        struct rds_iovec __user *local_vec;
        int tot_pages = 0;
        unsigned int nr_pages;
@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
        if (args->nr_local == 0)
                return -EINVAL;
 
+       iov->iov = kcalloc(args->nr_local,
+                          sizeof(struct rds_iovec),
+                          GFP_KERNEL);
+       if (!iov->iov)
+               return -ENOMEM;
+
+       vec = &iov->iov[0];
+
+       if (copy_from_user(vec, local_vec, args->nr_local *
+                          sizeof(struct rds_iovec)))
+               return -EFAULT;
+       iov->len = args->nr_local;
+
        /* figure out the number of pages in the vector */
-       for (i = 0; i < args->nr_local; i++) {
-               if (copy_from_user(&vec, &local_vec[i],
-                                  sizeof(struct rds_iovec)))
-                       return -EFAULT;
+       for (i = 0; i < args->nr_local; i++, vec++) {
 
-               nr_pages = rds_pages_in_vec(&vec);
+               nr_pages = rds_pages_in_vec(vec);
                if (nr_pages == 0)
                        return -EINVAL;
 
@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
  * Extract all arguments and set up the rdma_op
  */
 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg)
+                      struct cmsghdr *cmsg,
+                      struct rds_iov_vector *vec)
 {
        struct rds_rdma_args *args;
        struct rm_rdma_op *op = &rm->rdma;
        int nr_pages;
        unsigned int nr_bytes;
        struct page **pages = NULL;
-       struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
-       int iov_size;
+       struct rds_iovec *iovs;
        unsigned int i, j;
        int ret = 0;
 
@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                goto out_ret;
        }
 
-       /* Check whether to allocate the iovec area */
-       iov_size = args->nr_local * sizeof(struct rds_iovec);
-       if (args->nr_local > UIO_FASTIOV) {
-               iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
-               if (!iovs) {
-                       ret = -ENOMEM;
-                       goto out_ret;
-               }
+       if (vec->len != args->nr_local) {
+               ret = -EINVAL;
+               goto out_ret;
        }
 
-       if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       iovs = vec->iov;
 
        nr_pages = rds_rdma_pages(iovs, args->nr_local);
        if (nr_pages < 0) {
                ret = -EINVAL;
-               goto out;
+               goto out_ret;
        }
 
        pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
        if (!pages) {
                ret = -ENOMEM;
-               goto out;
+               goto out_ret;
        }
 
        op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -620,11 +623,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
        op->op_active = 1;
        op->op_recverr = rs->rs_recverr;
        WARN_ON(!nr_pages);
-       op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
-       if (!op->op_sg) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
+       if (!op->op_sg)
+               goto out_pages;
 
        if (op->op_notify || op->op_recverr) {
                /* We allocate an uninitialized notifier here, because
@@ -635,7 +636,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
                if (!op->op_notifier) {
                        ret = -ENOMEM;
-                       goto out;
+                       goto out_pages;
                }
                op->op_notifier->n_user_token = args->user_token;
                op->op_notifier->n_status = RDS_RDMA_SUCCESS;
@@ -681,7 +682,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                 */
                ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
                if (ret < 0)
-                       goto out;
+                       goto out_pages;
                else
                        ret = 0;
 
@@ -714,13 +715,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                                nr_bytes,
                                (unsigned int) args->remote_vec.bytes);
                ret = -EINVAL;
-               goto out;
+               goto out_pages;
        }
        op->op_bytes = nr_bytes;
 
-out:
-       if (iovs != iovstack)
-               sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
+out_pages:
        kfree(pages);
 out_ret:
        if (ret)
@@ -838,11 +837,9 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
        rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
        rm->atomic.op_active = 1;
        rm->atomic.op_recverr = rs->rs_recverr;
-       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
-       if (!rm->atomic.op_sg) {
-               ret = -ENOMEM;
+       rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
+       if (!rm->atomic.op_sg)
                goto err;
-       }
 
        /* verify 8 byte-aligned */
        if (args->local_addr & 0x7) {
index 6bfaf05b63b21efddec1b9fd03e80bc196276c3d..02ec4a3b2799018f7c99a09a24821fc8a72abb13 100644 (file)
@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
        INIT_LIST_HEAD(&q->zcookie_head);
 }
 
+struct rds_iov_vector {
+       struct rds_iovec *iov;
+       int               len;
+};
+
+struct rds_iov_vector_arr {
+       struct rds_iov_vector *vec;
+       int                    len;
+       int                    indx;
+       int                    incr;
+};
+
 struct rds_message {
        refcount_t              m_refcount;
        struct list_head        m_sock_item;
@@ -827,7 +839,8 @@ rds_conn_connecting(struct rds_connection *conn)
 
 /* message.c */
 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+                                         int *ret);
 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
                               bool zcopy);
 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -904,13 +917,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
 void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_rdma_extra_size(struct rds_rdma_args *args);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg);
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+                       struct rds_iov_vector *iov);
 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
                          struct cmsghdr *cmsg);
 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
-                         struct cmsghdr *cmsg);
+                         struct cmsghdr *cmsg,
+                         struct rds_iov_vector *vec);
 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
                          struct cmsghdr *cmsg);
 void rds_rdma_free_op(struct rm_rdma_op *ro);
index fe785ee819ddb195db524124c854decf6937d23b..3d822bad7de9093c4673349b0c465406b89d4722 100644 (file)
@@ -876,13 +876,18 @@ out:
  * rds_message is getting to be quite complicated, and we'd like to allocate
  * it all in one go. This figures out how big it needs to be up front.
  */
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+                      struct rds_iov_vector_arr *vct)
 {
        struct cmsghdr *cmsg;
        int size = 0;
        int cmsg_groups = 0;
        int retval;
        bool zcopy_cookie = false;
+       struct rds_iov_vector *iov, *tmp_iov;
+
+       if (num_sgs < 0)
+               return -EINVAL;
 
        for_each_cmsghdr(cmsg, msg) {
                if (!CMSG_OK(msg, cmsg))
@@ -893,8 +898,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
 
                switch (cmsg->cmsg_type) {
                case RDS_CMSG_RDMA_ARGS:
+                       if (vct->indx >= vct->len) {
+                               vct->len += vct->incr;
+                               tmp_iov =
+                                       krealloc(vct->vec,
+                                                vct->len *
+                                                sizeof(struct rds_iov_vector),
+                                                GFP_KERNEL);
+                               if (!tmp_iov) {
+                                       vct->len -= vct->incr;
+                                       return -ENOMEM;
+                               }
+                               vct->vec = tmp_iov;
+                       }
+                       iov = &vct->vec[vct->indx];
+                       memset(iov, 0, sizeof(struct rds_iov_vector));
+                       vct->indx++;
                        cmsg_groups |= 1;
-                       retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+                       retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
                        if (retval < 0)
                                return retval;
                        size += retval;
@@ -951,10 +972,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
 }
 
 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
-                        struct msghdr *msg, int *allocated_mr)
+                        struct msghdr *msg, int *allocated_mr,
+                        struct rds_iov_vector_arr *vct)
 {
        struct cmsghdr *cmsg;
-       int ret = 0;
+       int ret = 0, ind = 0;
 
        for_each_cmsghdr(cmsg, msg) {
                if (!CMSG_OK(msg, cmsg))
@@ -968,7 +990,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
                 */
                switch (cmsg->cmsg_type) {
                case RDS_CMSG_RDMA_ARGS:
-                       ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+                       if (ind >= vct->indx)
+                               return -ENOMEM;
+                       ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+                       ind++;
                        break;
 
                case RDS_CMSG_RDMA_DEST:
@@ -1084,6 +1109,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                      sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
        int num_sgs = ceil(payload_len, PAGE_SIZE);
        int namelen;
+       struct rds_iov_vector_arr vct;
+       int ind;
+
+       memset(&vct, 0, sizeof(vct));
+
+       /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+       vct.incr = 1;
 
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
@@ -1220,7 +1252,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
        }
        /* size of rm including all sgs */
-       ret = rds_rm_size(msg, num_sgs);
+       ret = rds_rm_size(msg, num_sgs, &vct);
        if (ret < 0)
                goto out;
 
@@ -1232,11 +1264,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* Attach data to the rm */
        if (payload_len) {
-               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
-               if (!rm->data.op_sg) {
-                       ret = -ENOMEM;
+               rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
+               if (!rm->data.op_sg)
                        goto out;
-               }
                ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
                if (ret)
                        goto out;
@@ -1270,7 +1300,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        rm->m_conn_path = cpath;
 
        /* Parse any control messages the user may have included. */
-       ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+       ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
        if (ret) {
                /* Trigger connection so that its ready for the next retry */
                if (ret ==  -EAGAIN)
@@ -1348,9 +1378,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        if (ret)
                goto out;
        rds_message_put(rm);
+
+       for (ind = 0; ind < vct.indx; ind++)
+               kfree(vct.vec[ind].iov);
+       kfree(vct.vec);
+
        return payload_len;
 
 out:
+       for (ind = 0; ind < vct.indx; ind++)
+               kfree(vct.vec[ind].iov);
+       kfree(vct.vec);
+
        /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
         * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
         * or in any other way, we need to destroy the MR again */
index 71312d7bd8f490c9b8200ccaac59ea0cd0031da6..208d940464d7b45665d7e6224775ff2f44544c27 100644 (file)
@@ -1258,10 +1258,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
        if (fold) {
-               if (!tc_skip_sw(fold->flags))
-                       rhashtable_remove_fast(&fold->mask->ht,
-                                              &fold->ht_node,
-                                              fold->mask->filter_ht_params);
+               rhashtable_remove_fast(&fold->mask->ht,
+                                      &fold->ht_node,
+                                      fold->mask->filter_ht_params);
                if (!tc_skip_hw(fold->flags))
                        fl_hw_destroy_filter(tp, fold, NULL);
        }
index fc6c5e4bffa540069f70cf33bda2942d7143fcd4..7f0539db56047919dbb79d32bafdfb513305b4a3 100644 (file)
@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
                if (addr) {
                        addr->a.v6.sin6_family = AF_INET6;
                        addr->a.v6.sin6_port = 0;
+                       addr->a.v6.sin6_flowinfo = 0;
                        addr->a.v6.sin6_addr = ifa->addr;
                        addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
                        addr->valid = 1;
index 5fbaf1901571cd2e41a65773ac2f3dc7fa80525d..82cb0e5634bc76ca4f3be1019e3f517591d74be3 100644 (file)
@@ -147,8 +147,14 @@ static int smc_release(struct socket *sock)
                sk->sk_shutdown |= SHUTDOWN_MASK;
        }
        if (smc->clcsock) {
+               if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+                       /* wake up clcsock accept */
+                       rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+               }
+               mutex_lock(&smc->clcsock_release_lock);
                sock_release(smc->clcsock);
                smc->clcsock = NULL;
+               mutex_unlock(&smc->clcsock_release_lock);
        }
        if (smc->use_fallback) {
                if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -205,6 +211,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        spin_lock_init(&smc->conn.send_lock);
        sk->sk_prot->hash(sk);
        sk_refcnt_debug_inc(sk);
+       mutex_init(&smc->clcsock_release_lock);
 
        return sk;
 }
@@ -821,7 +828,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        struct socket *new_clcsock = NULL;
        struct sock *lsk = &lsmc->sk;
        struct sock *new_sk;
-       int rc;
+       int rc = -EINVAL;
 
        release_sock(lsk);
        new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -834,7 +841,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
        }
        *new_smc = smc_sk(new_sk);
 
-       rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+       mutex_lock(&lsmc->clcsock_release_lock);
+       if (lsmc->clcsock)
+               rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+       mutex_unlock(&lsmc->clcsock_release_lock);
        lock_sock(lsk);
        if  (rc < 0)
                lsk->sk_err = -rc;
index 08786ace6010028aae9f946e4b72378c3ad9181c..5721416d060534ff294be6db270e699fd942ba7f 100644 (file)
@@ -219,6 +219,10 @@ struct smc_sock {                          /* smc sock container */
                                                 * started, waiting for unsent
                                                 * data to be sent
                                                 */
+       struct mutex            clcsock_release_lock;
+                                               /* protects clcsock of a listen
+                                                * socket
+                                                * */
 };
 
 static inline struct smc_sock *smc_sk(const struct sock *sk)
index c6782aa475257bb510402a2172c8d4f55706a79a..24cbddc44c884ce6fa9a4ba51a7e85c30953edd2 100644 (file)
@@ -1952,6 +1952,7 @@ call_connect_status(struct rpc_task *task)
                /* retry with existing socket, after a delay */
                rpc_delay(task, 3*HZ);
                /* fall through */
+       case -ENOTCONN:
        case -EAGAIN:
                /* Check for timeouts before looping back to call_bind */
        case -ETIMEDOUT:
index ce927002862a675a9f1169d12fbeb6999984a1c6..73547d17d3c61a844c860c33acaff5259c81d54e 100644 (file)
@@ -67,7 +67,6 @@
  */
 static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
-static void    xprt_connect_status(struct rpc_task *task);
 static void     xprt_destroy(struct rpc_xprt *xprt);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
@@ -680,7 +679,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
        /* Try to schedule an autoclose RPC call */
        if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
                queue_work(xprtiod_workqueue, &xprt->task_cleanup);
-       xprt_wake_pending_tasks(xprt, -EAGAIN);
+       else if (xprt->snd_task)
+               rpc_wake_up_queued_task_set_status(&xprt->pending,
+                               xprt->snd_task, -ENOTCONN);
        spin_unlock_bh(&xprt->transport_lock);
 }
 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@@ -820,7 +821,7 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_connected(xprt)) {
                task->tk_timeout = task->tk_rqstp->rq_timeout;
                task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
-               rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
+               rpc_sleep_on(&xprt->pending, task, NULL);
 
                if (test_bit(XPRT_CLOSING, &xprt->state))
                        return;
@@ -839,34 +840,6 @@ void xprt_connect(struct rpc_task *task)
        xprt_release_write(xprt, task);
 }
 
-static void xprt_connect_status(struct rpc_task *task)
-{
-       switch (task->tk_status) {
-       case 0:
-               dprintk("RPC: %5u xprt_connect_status: connection established\n",
-                               task->tk_pid);
-               break;
-       case -ECONNREFUSED:
-       case -ECONNRESET:
-       case -ECONNABORTED:
-       case -ENETUNREACH:
-       case -EHOSTUNREACH:
-       case -EPIPE:
-       case -EAGAIN:
-               dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
-               break;
-       case -ETIMEDOUT:
-               dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
-                               "out\n", task->tk_pid);
-               break;
-       default:
-               dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
-                               "server %s\n", task->tk_pid, -task->tk_status,
-                               task->tk_rqstp->rq_xprt->servername);
-               task->tk_status = -EIO;
-       }
-}
-
 enum xprt_xid_rb_cmp {
        XID_RB_EQUAL,
        XID_RB_LEFT,
index 8a5e823e0b339b1998ff21b9cac814de213a2b23..f0b3700cec95e8f0ccf9eac212b47158877066ab 100644 (file)
@@ -1217,6 +1217,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
 
        trace_rpc_socket_close(xprt, sock);
        sock_release(sock);
+
+       xprt_disconnect_done(xprt);
 }
 
 /**
@@ -1237,8 +1239,6 @@ static void xs_close(struct rpc_xprt *xprt)
 
        xs_reset_transport(transport);
        xprt->reestablish_timeout = 0;
-
-       xprt_disconnect_done(xprt);
 }
 
 static void xs_inject_disconnect(struct rpc_xprt *xprt)
@@ -1489,8 +1489,6 @@ static void xs_tcp_state_change(struct sock *sk)
                                        &transport->sock_state))
                        xprt_clear_connecting(xprt);
                clear_bit(XPRT_CLOSING, &xprt->state);
-               if (sk->sk_err)
-                       xprt_wake_pending_tasks(xprt, -sk->sk_err);
                /* Trigger the socket release */
                xs_tcp_force_close(xprt);
        }
@@ -2092,8 +2090,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
        trace_rpc_socket_connect(xprt, sock, 0);
        status = 0;
 out:
-       xprt_unlock_connect(xprt, transport);
        xprt_clear_connecting(xprt);
+       xprt_unlock_connect(xprt, transport);
        xprt_wake_pending_tasks(xprt, status);
 }
 
@@ -2329,8 +2327,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
        }
        status = -EAGAIN;
 out:
-       xprt_unlock_connect(xprt, transport);
        xprt_clear_connecting(xprt);
+       xprt_unlock_connect(xprt, transport);
        xprt_wake_pending_tasks(xprt, status);
 }
 
index b57b1be7252baef2f6410710e225864877c08ac1..8f34db2a97857bbdf0b3fc9bf33353a6e1740912 100644 (file)
@@ -880,7 +880,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        int blks = tsk_blocks(GROUP_H_SIZE + dlen);
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_group *grp = tsk->group;
        struct net *net = sock_net(sk);
        struct tipc_member *mb = NULL;
        u32 node, port;
@@ -894,7 +893,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
        /* Block or return if destination link or member is congested */
        rc = tipc_wait_for_cond(sock, &timeout,
                                !tipc_dest_find(&tsk->cong_links, node, 0) &&
-                               !tipc_group_cong(grp, node, port, blks, &mb));
+                               tsk->group &&
+                               !tipc_group_cong(tsk->group, node, port, blks,
+                                                &mb));
        if (unlikely(rc))
                return rc;
 
@@ -924,7 +925,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
        struct tipc_sock *tsk = tipc_sk(sk);
        struct list_head *cong_links = &tsk->cong_links;
        int blks = tsk_blocks(GROUP_H_SIZE + dlen);
-       struct tipc_group *grp = tsk->group;
        struct tipc_msg *hdr = &tsk->phdr;
        struct tipc_member *first = NULL;
        struct tipc_member *mbr = NULL;
@@ -941,9 +941,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
        type = msg_nametype(hdr);
        inst = dest->addr.name.name.instance;
        scope = msg_lookup_scope(hdr);
-       exclude = tipc_group_exclude(grp);
 
        while (++lookups < 4) {
+               exclude = tipc_group_exclude(tsk->group);
+
                first = NULL;
 
                /* Look for a non-congested destination member, if any */
@@ -952,7 +953,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
                                                 &dstcnt, exclude, false))
                                return -EHOSTUNREACH;
                        tipc_dest_pop(&dsts, &node, &port);
-                       cong = tipc_group_cong(grp, node, port, blks, &mbr);
+                       cong = tipc_group_cong(tsk->group, node, port, blks,
+                                              &mbr);
                        if (!cong)
                                break;
                        if (mbr == first)
@@ -971,7 +973,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
                /* Block or return if destination link or member is congested */
                rc = tipc_wait_for_cond(sock, &timeout,
                                        !tipc_dest_find(cong_links, node, 0) &&
-                                       !tipc_group_cong(grp, node, port,
+                                       tsk->group &&
+                                       !tipc_group_cong(tsk->group, node, port,
                                                         blks, &mbr));
                if (unlikely(rc))
                        return rc;
@@ -1006,8 +1009,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_group *grp = tsk->group;
-       struct tipc_nlist *dsts = tipc_group_dests(grp);
+       struct tipc_nlist *dsts;
        struct tipc_mc_method *method = &tsk->mc_method;
        bool ack = method->mandatory && method->rcast;
        int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1016,15 +1018,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        struct sk_buff_head pkts;
        int rc = -EHOSTUNREACH;
 
-       if (!dsts->local && !dsts->remote)
-               return -EHOSTUNREACH;
-
        /* Block or return if any destination link or member is congested */
-       rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
-                               !tipc_group_bc_cong(grp, blks));
+       rc = tipc_wait_for_cond(sock, &timeout,
+                               !tsk->cong_link_cnt && tsk->group &&
+                               !tipc_group_bc_cong(tsk->group, blks));
        if (unlikely(rc))
                return rc;
 
+       dsts = tipc_group_dests(tsk->group);
+       if (!dsts->local && !dsts->remote)
+               return -EHOSTUNREACH;
+
        /* Complete message header */
        if (dest) {
                msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1036,7 +1040,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
        msg_set_hdr_sz(hdr, GROUP_H_SIZE);
        msg_set_destport(hdr, 0);
        msg_set_destnode(hdr, 0);
-       msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
+       msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
 
        /* Avoid getting stuck with repeated forced replicasts */
        msg_set_grp_bc_ack_req(hdr, ack);
@@ -2724,11 +2728,15 @@ void tipc_sk_reinit(struct net *net)
                rhashtable_walk_start(&iter);
 
                while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
+                       sock_hold(&tsk->sk);
+                       rhashtable_walk_stop(&iter);
+                       lock_sock(&tsk->sk);
                        msg = &tsk->phdr;
                        msg_set_prevnode(msg, tipc_own_addr(net));
                        msg_set_orignode(msg, tipc_own_addr(net));
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+                       release_sock(&tsk->sk);
+                       rhashtable_walk_start(&iter);
+                       sock_put(&tsk->sk);
                }
 
                rhashtable_walk_stop(&iter);
index 10dc59ce9c8221abbe0edce624d59bd0cf758d69..4d85d71f16e2abcee0f2abd84020f9b471f3e8fa 100644 (file)
@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                }
 
                err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
-               if (err) {
-                       kfree_skb(_skb);
+               if (err)
                        goto out;
-               }
        }
        err = 0;
 out:
@@ -681,6 +679,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
        if (err)
                goto err;
 
+       if (remote.proto != local.proto) {
+               err = -EINVAL;
+               goto err;
+       }
+
        /* Checking remote ip address */
        rmcast = tipc_udp_is_mcast_addr(&remote);
 
index 311cec8e533dee755fc452977b4955e59893c13a..28887cf628b82321d2eab5bea38f9994d82d3fef 100644 (file)
@@ -56,7 +56,7 @@ enum {
 static struct proto *saved_tcpv6_prot;
 static DEFINE_MUTEX(tcpv6_prot_mutex);
 static LIST_HEAD(device_list);
-static DEFINE_MUTEX(device_mutex);
+static DEFINE_SPINLOCK(device_spinlock);
 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
 static struct proto_ops tls_sw_proto_ops;
 
@@ -538,11 +538,14 @@ static struct tls_context *create_ctx(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tls_context *ctx;
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
        if (!ctx)
                return NULL;
 
        icsk->icsk_ulp_data = ctx;
+       ctx->setsockopt = sk->sk_prot->setsockopt;
+       ctx->getsockopt = sk->sk_prot->getsockopt;
+       ctx->sk_proto_close = sk->sk_prot->close;
        return ctx;
 }
 
@@ -552,7 +555,7 @@ static int tls_hw_prot(struct sock *sk)
        struct tls_device *dev;
        int rc = 0;
 
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
                if (dev->feature && dev->feature(dev)) {
                        ctx = create_ctx(sk);
@@ -570,7 +573,7 @@ static int tls_hw_prot(struct sock *sk)
                }
        }
 out:
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
        return rc;
 }
 
@@ -579,12 +582,17 @@ static void tls_hw_unhash(struct sock *sk)
        struct tls_context *ctx = tls_get_ctx(sk);
        struct tls_device *dev;
 
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
-               if (dev->unhash)
+               if (dev->unhash) {
+                       kref_get(&dev->kref);
+                       spin_unlock_bh(&device_spinlock);
                        dev->unhash(dev, sk);
+                       kref_put(&dev->kref, dev->release);
+                       spin_lock_bh(&device_spinlock);
+               }
        }
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
        ctx->unhash(sk);
 }
 
@@ -595,12 +603,17 @@ static int tls_hw_hash(struct sock *sk)
        int err;
 
        err = ctx->hash(sk);
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_for_each_entry(dev, &device_list, dev_list) {
-               if (dev->hash)
+               if (dev->hash) {
+                       kref_get(&dev->kref);
+                       spin_unlock_bh(&device_spinlock);
                        err |= dev->hash(dev, sk);
+                       kref_put(&dev->kref, dev->release);
+                       spin_lock_bh(&device_spinlock);
+               }
        }
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 
        if (err)
                tls_hw_unhash(sk);
@@ -675,9 +688,6 @@ static int tls_init(struct sock *sk)
                rc = -ENOMEM;
                goto out;
        }
-       ctx->setsockopt = sk->sk_prot->setsockopt;
-       ctx->getsockopt = sk->sk_prot->getsockopt;
-       ctx->sk_proto_close = sk->sk_prot->close;
 
        /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
        if (ip_ver == TLSV6 &&
@@ -699,17 +709,17 @@ out:
 
 void tls_register_device(struct tls_device *device)
 {
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_add_tail(&device->dev_list, &device_list);
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 }
 EXPORT_SYMBOL(tls_register_device);
 
 void tls_unregister_device(struct tls_device *device)
 {
-       mutex_lock(&device_mutex);
+       spin_lock_bh(&device_spinlock);
        list_del(&device->dev_list);
-       mutex_unlock(&device_mutex);
+       spin_unlock_bh(&device_spinlock);
 }
 EXPORT_SYMBOL(tls_unregister_device);
 
index ab27a2872935774d41fb1f2c2f9341eb67c8cc0a..43a1dec08825fe824395851a2e81b8cf18f0f518 100644 (file)
 #include <linux/mutex.h>
 #include <linux/net.h>
 #include <linux/poll.h>
+#include <linux/random.h>
 #include <linux/skbuff.h>
 #include <linux/smp.h>
 #include <linux/socket.h>
@@ -504,9 +505,13 @@ out:
 static int __vsock_bind_stream(struct vsock_sock *vsk,
                               struct sockaddr_vm *addr)
 {
-       static u32 port = LAST_RESERVED_PORT + 1;
+       static u32 port = 0;
        struct sockaddr_vm new_addr;
 
+       if (!port)
+               port = LAST_RESERVED_PORT + 1 +
+                       prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+
        vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
 
        if (addr->svm_port == VMADDR_PORT_ANY) {
index cb332adb84cdcadc006de6d7a8668111babd2f53..c361ce7824123da38ec613361a2ae5fa21197b7b 100644 (file)
@@ -263,6 +263,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
                                                 false);
 }
 
+static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+                                     struct sockaddr_vm *dst,
+                                     enum vmci_transport_packet_type type,
+                                     u64 size,
+                                     u64 mode,
+                                     struct vmci_transport_waiting_info *wait,
+                                     u16 proto,
+                                     struct vmci_handle handle)
+{
+       struct vmci_transport_packet *pkt;
+       int err;
+
+       pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+       if (!pkt)
+               return -ENOMEM;
+
+       err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+                                               mode, wait, proto, handle,
+                                               true);
+       kfree(pkt);
+
+       return err;
+}
+
 static int
 vmci_transport_send_control_pkt(struct sock *sk,
                                enum vmci_transport_packet_type type,
@@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
                                u16 proto,
                                struct vmci_handle handle)
 {
-       struct vmci_transport_packet *pkt;
        struct vsock_sock *vsk;
-       int err;
 
        vsk = vsock_sk(sk);
 
@@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
        if (!vsock_addr_bound(&vsk->remote_addr))
                return -EINVAL;
 
-       pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
-       if (!pkt)
-               return -ENOMEM;
-
-       err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
-                                               &vsk->remote_addr, type, size,
-                                               mode, wait, proto, handle,
-                                               true);
-       kfree(pkt);
-
-       return err;
+       return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+                                                    &vsk->remote_addr,
+                                                    type, size, mode,
+                                                    wait, proto, handle);
 }
 
 static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
 static int vmci_transport_send_reset(struct sock *sk,
                                     struct vmci_transport_packet *pkt)
 {
+       struct sockaddr_vm *dst_ptr;
+       struct sockaddr_vm dst;
+       struct vsock_sock *vsk;
+
        if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
                return 0;
-       return vmci_transport_send_control_pkt(sk,
-                                       VMCI_TRANSPORT_PACKET_TYPE_RST,
-                                       0, 0, NULL, VSOCK_PROTO_INVALID,
-                                       VMCI_INVALID_HANDLE);
+
+       vsk = vsock_sk(sk);
+
+       if (!vsock_addr_bound(&vsk->local_addr))
+               return -EINVAL;
+
+       if (vsock_addr_bound(&vsk->remote_addr)) {
+               dst_ptr = &vsk->remote_addr;
+       } else {
+               vsock_addr_init(&dst, pkt->dg.src.context,
+                               pkt->src_port);
+               dst_ptr = &dst;
+       }
+       return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+                                            VMCI_TRANSPORT_PACKET_TYPE_RST,
+                                            0, 0, NULL, VSOCK_PROTO_INVALID,
+                                            VMCI_INVALID_HANDLE);
 }
 
 static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
index 8d763725498c15fc7474f5ca78802233800ee4c5..2317727d64134ffe98f8930eb02359d69834d4ac 100644 (file)
@@ -8930,8 +8930,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
                int r = validate_pae_over_nl80211(rdev, info);
 
-               if (r < 0)
+               if (r < 0) {
+                       kzfree(connkeys);
                        return r;
+               }
 
                ibss.control_port_over_nl80211 = true;
        }
index 684c0bc01e2c12ce5ef61195030fbad2cd26fc02..d5635908587f4de80396831f24cdf7591522f511 100644 (file)
@@ -346,6 +346,12 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                skb->sp->xvec[skb->sp->len++] = x;
 
+               skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+                       goto drop;
+               }
+
 lock:
                spin_lock(&x->lock);
 
@@ -385,7 +391,6 @@ lock:
                XFRM_SKB_CB(skb)->seq.input.low = seq;
                XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
 
-               skb_dst_force(skb);
                dev_hold(skb->dev);
 
                if (crypto_done)
index 4ae87c5ce2e357b420aaa102bdbb416e7039f90a..fef6b2da3c5d7fadd06f466f9ee1cdf3909a5809 100644 (file)
@@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                skb_dst_force(skb);
                if (!skb_dst(skb)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       err = -EHOSTUNREACH;
                        goto error_nolock;
                }
 
index dc4a9f1fb941a8eef7f1a3b68563c17abcf4d919..23c92891758a829e06dd776baf7ca340dec67b9f 100644 (file)
@@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
        module_put(mode->owner);
 }
 
+void xfrm_state_free(struct xfrm_state *x)
+{
+       kmem_cache_free(xfrm_state_cache, x);
+}
+EXPORT_SYMBOL(xfrm_state_free);
+
 static void xfrm_state_gc_destroy(struct xfrm_state *x)
 {
        tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
        }
        xfrm_dev_state_free(x);
        security_xfrm_state_free(x);
-       kmem_cache_free(xfrm_state_cache, x);
+       xfrm_state_free(x);
 }
 
 static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 {
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
        si->sadcnt = net->xfrm.state_num;
-       si->sadhcnt = net->xfrm.state_hmask;
+       si->sadhcnt = net->xfrm.state_hmask + 1;
        si->sadhmcnt = xfrm_state_hashmax;
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 }
index c9a84e22f5d578216cd59687e293ed3a078cd565..277c1c46fe94e17db85f20a4abe3bacae9a1f1b0 100644 (file)
@@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        }
 
-       kfree(x);
+       xfrm_state_free(x);
        kfree(xp);
 
        return 0;
 
 free_state:
-       kfree(x);
+       xfrm_state_free(x);
 nomem:
        return err;
 }
index 8081b6cf67d2120f862a2926ac58b33b2e7af453..34414c6efad6398b68f7e2c331f9e6185469c5c0 100755 (executable)
@@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
        $xs     = "[0-9a-f ]";  # hex character or space
        $funcre = qr/^$x* <(.*)>:$/;
        if ($arch eq 'aarch64') {
-               #ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp,#-80]!
-               $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+               #ffffffc0006325cc:       a9bb7bfd        stp     x29, x30, [sp, #-80]!
+               $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
        } elsif ($arch eq 'arm') {
                #c0008ffc:      e24dd064        sub     sp, sp, #100    ; 0x64
                $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
index 5056fb3b897d0094e182bba4fca08ae491dab7cc..e559c6294c39aef2819637125520a0721b14a95d 100755 (executable)
@@ -168,6 +168,7 @@ class id_parser(object):
         self.curline = 0
         try:
             for line in fd:
+                line = line.decode(locale.getpreferredencoding(False), errors='ignore')
                 self.curline += 1
                 if self.curline > maxlines:
                     break
@@ -249,12 +250,13 @@ if __name__ == '__main__':
 
     try:
         if len(args.path) and args.path[0] == '-':
-            parser.parse_lines(sys.stdin, args.maxlines, '-')
+            stdin = os.fdopen(sys.stdin.fileno(), 'rb')
+            parser.parse_lines(stdin, args.maxlines, '-')
         else:
             if args.path:
                 for p in args.path:
                     if os.path.isfile(p):
-                        parser.parse_lines(open(p), args.maxlines, p)
+                        parser.parse_lines(open(p, 'rb'), args.maxlines, p)
                     elif os.path.isdir(p):
                         scan_git_subtree(repo.head.reference.commit.tree, p)
                     else:
index 8c9499867c918bb163946679f0e0d7f57fad5dfa..7489cb7de6dc9ff5c1caaa83c1574a8a713ecfd5 100644 (file)
@@ -580,9 +580,9 @@ void ima_update_policy(void)
        ima_update_policy_flag();
 }
 
+/* Keep the enumeration in sync with the policy_tokens! */
 enum {
-       Opt_err = -1,
-       Opt_measure = 1, Opt_dont_measure,
+       Opt_measure, Opt_dont_measure,
        Opt_appraise, Opt_dont_appraise,
        Opt_audit, Opt_hash, Opt_dont_hash,
        Opt_obj_user, Opt_obj_role, Opt_obj_type,
@@ -592,10 +592,10 @@ enum {
        Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
        Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
        Opt_appraise_type, Opt_permit_directio,
-       Opt_pcr
+       Opt_pcr, Opt_err
 };
 
-static match_table_t policy_tokens = {
+static const match_table_t policy_tokens = {
        {Opt_measure, "measure"},
        {Opt_dont_measure, "dont_measure"},
        {Opt_appraise, "appraise"},
@@ -1103,7 +1103,7 @@ void ima_policy_stop(struct seq_file *m, void *v)
 {
 }
 
-#define pt(token)      policy_tokens[token + Opt_err].pattern
+#define pt(token)      policy_tokens[token].pattern
 #define mt(token)      mask_tokens[token]
 
 /*
index 783978842f13a78805718015f1442bc61ca1b9ca..70e65a2ff2073a5d65cd20fb07b5accce4684955 100644 (file)
@@ -25,7 +25,7 @@ static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
 }
 
 enum {
-       Opt_err = -1,
+       Opt_err,
        Opt_enc,                /* "enc=<encoding>" eg. "enc=oaep" */
        Opt_hash,               /* "hash=<digest-name>" eg. "hash=sha1" */
 };
index ff6789365a12fb15ed91e160ba2936262ec5c29c..697bfc6c819236c39b828fc786cd6d08ff7bd592 100644 (file)
@@ -711,7 +711,7 @@ static int key_unseal(struct trusted_key_payload *p,
 }
 
 enum {
-       Opt_err = -1,
+       Opt_err,
        Opt_new, Opt_load, Opt_update,
        Opt_keyhandle, Opt_keyauth, Opt_blobauth,
        Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
index 64c3cb0fb926ff8b9a51b21ec4d81cca464d3583..654a50319198e4ec59c5acf85e36816783f3f8df 100644 (file)
@@ -30,7 +30,7 @@ static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
        int err;
 
        err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
-                                FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
+                                FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
        if (err < 0)
                return err;
        data = le32_to_cpu(reg);
index 8d75597028eebbb9f32e6de7009f101b8c117f4d..15021c83937284ff1eab5dd2f7b59f736a35626c 100644 (file)
@@ -5520,6 +5520,9 @@ enum {
        ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
        ALC295_FIXUP_HP_AUTO_MUTE,
        ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+       ALC294_FIXUP_ASUS_MIC,
+       ALC294_FIXUP_ASUS_HEADSET_MIC,
+       ALC294_FIXUP_ASUS_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6392,6 +6395,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_invalidate_dacs,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_THINKPAD_ACPI
        },
        [ALC295_FIXUP_HP_AUTO_MUTE] = {
                .type = HDA_FIXUP_FUNC,
@@ -6406,6 +6411,36 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
+       [ALC294_FIXUP_ASUS_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x13, 0x90a60160 }, /* use as internal mic */
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
+       [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
+       [ALC294_FIXUP_ASUS_SPK] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Set EAPD high */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6548,6 +6583,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+       SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7155,6 +7191,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x13, 0x90a60140}),
+       SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
+               {0x14, 0x90170110},
+               {0x1b, 0x90a70130},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC295_STANDARD_PINS,
                {0x17, 0x21014020},
@@ -7227,6 +7271,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       int i, val;
+
+       if (!hp_pin)
+               return;
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       msleep(100);
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+       /* Wait for depop procedure finish  */
+       val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       for (i = 0; i < 20 && val & 0x0080; i++) {
+               msleep(50);
+               val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       }
+       /* Set HP depop to auto mode */
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+       msleep(50);
+}
+
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7352,6 +7427,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC294;
                spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+               alc294_hp_init(codec);
                break;
        case 0x10ec0300:
                spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7363,6 +7439,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
+               alc294_hp_init(codec);
                break;
 
        }
index 486ed1f0c0bc17f48dca895ebf9581aa7d69278d..0a4d73317759c9ee523d7d5482548b3171c821e6 100644 (file)
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
-#define NETLINK_DUMP_STRICT_CHK                12
+#define NETLINK_GET_STRICT_CHK         12
 
 struct nl_pktinfo {
        __u32   group;
index acf1afa01c5b9ce26e55655e8957ae14794e9c83..397d6b612502de8c13b6f4862e5b642d25f37ee1 100644 (file)
@@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu
 TARGETS = main idr-test multiorder xarray
 CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
 OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
+        regression4.o \
         tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
 
 ifndef SHIFT
index 77a44c54998f0ef908a80e4540c188b02afe18b4..7a22d6e3732e5be22a3dfce09ee0071dff0babc8 100644 (file)
@@ -308,6 +308,7 @@ int main(int argc, char **argv)
        regression1_test();
        regression2_test();
        regression3_test();
+       regression4_test();
        iteration_test(0, 10 + 90 * long_run);
        iteration_test(7, 10 + 90 * long_run);
        single_thread_tests(long_run);
index 3c8a1584e9ee4c84be01b3ecda5cfbcbccd24fd3..135145af18b7c17b7943b89139506ab142702aad 100644 (file)
@@ -5,5 +5,6 @@
 void regression1_test(void);
 void regression2_test(void);
 void regression3_test(void);
+void regression4_test(void);
 
 #endif
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c
new file mode 100644 (file)
index 0000000..cf4e5ab
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "regression.h"
+
+static pthread_barrier_t worker_barrier;
+static int obj0, obj1;
+static RADIX_TREE(mt_tree, GFP_KERNEL);
+
+static void *reader_fn(void *arg)
+{
+       int i;
+       void *entry;
+
+       rcu_register_thread();
+       pthread_barrier_wait(&worker_barrier);
+
+       for (i = 0; i < 1000000; i++) {
+               rcu_read_lock();
+               entry = radix_tree_lookup(&mt_tree, 0);
+               rcu_read_unlock();
+               if (entry != &obj0) {
+                       printf("iteration %d bad entry = %p\n", i, entry);
+                       abort();
+               }
+       }
+
+       rcu_unregister_thread();
+
+       return NULL;
+}
+
+static void *writer_fn(void *arg)
+{
+       int i;
+
+       rcu_register_thread();
+       pthread_barrier_wait(&worker_barrier);
+
+       for (i = 0; i < 1000000; i++) {
+               radix_tree_insert(&mt_tree, 1, &obj1);
+               radix_tree_delete(&mt_tree, 1);
+       }
+
+       rcu_unregister_thread();
+
+       return NULL;
+}
+
+void regression4_test(void)
+{
+       pthread_t reader, writer;
+
+       printv(1, "regression test 4 starting\n");
+
+       radix_tree_insert(&mt_tree, 0, &obj0);
+       pthread_barrier_init(&worker_barrier, NULL, 2);
+
+       if (pthread_create(&reader, NULL, reader_fn, NULL) ||
+           pthread_create(&writer, NULL, writer_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+
+       if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) {
+               perror("pthread_join");
+               exit(1);
+       }
+
+       printv(1, "regression test 4 passed\n");
+}
index 107350a7821d09afe1e78721ae2aef06479b5fad..df9d32fd205538429180f05ef7828dca589875a5 100644 (file)
@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
 {
        void *data_end = (void *)(long)skb->data_end;
        void *data = (void *)(long)skb->data;
-       __u16 nhoff = skb->flow_keys->nhoff;
+       __u16 thoff = skb->flow_keys->thoff;
        __u8 *hdr;
 
        /* Verifies this variable offset does not overflow */
-       if (nhoff > (USHRT_MAX - hdr_size))
+       if (thoff > (USHRT_MAX - hdr_size))
                return NULL;
 
-       hdr = data + nhoff;
+       hdr = data + thoff;
        if (hdr + hdr_size <= data_end)
                return hdr;
 
-       if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size))
+       if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
                return NULL;
 
        return buffer;
@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                        /* Only inspect standard GRE packets with version 0 */
                        return BPF_OK;
 
-               keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */
+               keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
                if (GRE_IS_CSUM(gre->flags))
-                       keys->nhoff += 4; /* Step over chksum and Padding */
+                       keys->thoff += 4; /* Step over chksum and Padding */
                if (GRE_IS_KEY(gre->flags))
-                       keys->nhoff += 4; /* Step over key */
+                       keys->thoff += 4; /* Step over key */
                if (GRE_IS_SEQ(gre->flags))
-                       keys->nhoff += 4; /* Step over sequence number */
+                       keys->thoff += 4; /* Step over sequence number */
 
                keys->is_encap = true;
 
@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                        if (!eth)
                                return BPF_DROP;
 
-                       keys->nhoff += sizeof(*eth);
+                       keys->thoff += sizeof(*eth);
 
                        return parse_eth_proto(skb, eth->h_proto);
                } else {
@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
                        return BPF_DROP;
 
-               keys->thoff = keys->nhoff;
                keys->sport = tcp->source;
                keys->dport = tcp->dest;
                return BPF_OK;
@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
                if (!udp)
                        return BPF_DROP;
 
-               keys->thoff = keys->nhoff;
                keys->sport = udp->source;
                keys->dport = udp->dest;
                return BPF_OK;
@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)
        keys->ipv4_src = iph->saddr;
        keys->ipv4_dst = iph->daddr;
 
-       keys->nhoff += iph->ihl << 2;
-       if (data + keys->nhoff > data_end)
+       keys->thoff += iph->ihl << 2;
+       if (data + keys->thoff > data_end)
                return BPF_DROP;
 
        if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)
        keys->addr_proto = ETH_P_IPV6;
        memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
 
-       keys->nhoff += sizeof(struct ipv6hdr);
+       keys->thoff += sizeof(struct ipv6hdr);
 
        return parse_ipv6_proto(skb, ip6h->nexthdr);
 }
@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)
        /* hlen is in 8-octets and does not include the first 8 bytes
         * of the header
         */
-       skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3;
+       skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;
 
        return parse_ipv6_proto(skb, ip6h->nexthdr);
 }
@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
        if (!fragh)
                return BPF_DROP;
 
-       keys->nhoff += sizeof(*fragh);
+       keys->thoff += sizeof(*fragh);
        keys->is_frag = true;
        if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
                keys->is_first_frag = true;
@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)
        __be16 proto;
 
        /* Peek back to see if single or double-tagging */
-       if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto,
+       if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
                               sizeof(proto)))
                return BPF_DROP;
 
@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
                if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
                        return BPF_DROP;
 
-               keys->nhoff += sizeof(*vlan);
+               keys->thoff += sizeof(*vlan);
        }
 
        vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
        if (!vlan)
                return BPF_DROP;
 
-       keys->nhoff += sizeof(*vlan);
+       keys->thoff += sizeof(*vlan);
        /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
        if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
            vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
index df6f751cc1e81c0bcc1f68e86a5bc57e530a2452..f8eac4a544f450b4ae8acbfa7d57ab1b55675f97 100644 (file)
@@ -13915,6 +13915,34 @@ static struct bpf_test tests[] = {
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
+       {
+               "calls: cross frame pruning",
+               .insns = {
+                       /* r8 = !!random();
+                        * call pruner()
+                        * if (r8)
+                        *     do something bad;
+                        */
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_get_prandom_u32),
+                       BPF_MOV64_IMM(BPF_REG_8, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_8, 1),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .errstr = "!read_ok",
+               .result = REJECT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -13940,7 +13968,7 @@ static int create_map(uint32_t type, uint32_t size_key,
        return fd;
 }
 
-static int create_prog_dummy1(enum bpf_map_type prog_type)
+static int create_prog_dummy1(enum bpf_prog_type prog_type)
 {
        struct bpf_insn prog[] = {
                BPF_MOV64_IMM(BPF_REG_0, 42),
@@ -13951,7 +13979,7 @@ static int create_prog_dummy1(enum bpf_map_type prog_type)
                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 }
 
-static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
+static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
 {
        struct bpf_insn prog[] = {
                BPF_MOV64_IMM(BPF_REG_3, idx),
@@ -13966,7 +13994,7 @@ static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
                                ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 }
 
-static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
+static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
                             int p1key)
 {
        int p2key = 1;
@@ -14037,7 +14065,7 @@ static int create_cgroup_storage(bool percpu)
 
 static char bpf_vlog[UINT_MAX >> 8];
 
-static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
+static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                          struct bpf_insn *prog, int *map_fds)
 {
        int *fixup_map_hash_8b = test->fixup_map_hash_8b;
@@ -14166,7 +14194,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
                do {
                        prog[*fixup_map_stacktrace].imm = map_fds[12];
                        fixup_map_stacktrace++;
-               } while (fixup_map_stacktrace);
+               } while (*fixup_map_stacktrace);
        }
 }
 
index 256d82d5fa8751aa400a1eef4f22e5c60b0f4dea..923570a9708ae730909920e321ee2880af0f8a8f 100644 (file)
@@ -7,6 +7,7 @@ CFLAGS += -I../../../../usr/include/
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
 TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
 TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
+TEST_PROGS += test_vxlan_fdb_changelink.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
new file mode 100755 (executable)
index 0000000..2d442cd
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Check FDB default-remote handling across "ip link set".
+
+check_remotes()
+{
+       local what=$1; shift
+       local N=$(bridge fdb sh dev vx | grep 00:00:00:00:00:00 | wc -l)
+
+       echo -ne "expected two remotes after $what\t"
+       if [[ $N != 2 ]]; then
+               echo "[FAIL]"
+               EXIT_STATUS=1
+       else
+               echo "[ OK ]"
+       fi
+}
+
+ip link add name vx up type vxlan id 2000 dstport 4789
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent
+check_remotes "fdb append"
+
+ip link set dev vx type vxlan remote 192.0.2.30
+check_remotes "link set"
+
+ip link del dev vx
+exit $EXIT_STATUS
index e1473234968de7d0166d7f4446b5f6c796d5204e..c9a2abf8be1b38d0ea3d0ee14019b6078ca95af2 100644 (file)
@@ -2731,9 +2731,14 @@ TEST(syscall_restart)
        ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
        ASSERT_EQ(true, WIFSTOPPED(status));
        ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
-       /* Verify signal delivery came from parent now. */
        ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
-       EXPECT_EQ(getpid(), info.si_pid);
+       /*
+        * There is no siginfo on SIGSTOP any more, so we can't verify
+        * signal delivery came from parent now (getpid() == info.si_pid).
+        * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
+        * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
+        */
+       EXPECT_EQ(SIGSTOP, info.si_signo);
 
        /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
        ASSERT_EQ(0, kill(child_pid, SIGCONT));
index fb22bccfbc8a7f2df651474211b48aa67efdf8f9..7ef45a4a3cba72dea65d6c9ff9b7779710d20de5 100644 (file)
 #define PAGE_MASK (~(PAGE_SIZE-1))
 #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
 
+/* generic data direction definitions */
+#define READ                    0
+#define WRITE                   1
+
 typedef unsigned long long phys_addr_t;
 typedef unsigned long long dma_addr_t;
 typedef size_t __kernel_size_t;
index 3710342cf6ad01f7c495ff27ddc2c83feb643dbb..6855cce3e528793fd47b90f5f9811ea5617c693a 100644 (file)
@@ -175,10 +175,14 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
 {
        struct kvm_coalesced_mmio_dev *dev, *tmp;
 
+       if (zone->pio != 1 && zone->pio != 0)
+               return -EINVAL;
+
        mutex_lock(&kvm->slots_lock);
 
        list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
-               if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+               if (zone->pio == dev->zone.pio &&
+                   coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
                        kvm_io_bus_unregister_dev(kvm,
                                zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
                        kvm_iodevice_destructor(&dev->dev);