Merge branch 'drm-fixes-5.0' of git://people.freedesktop.org/~agd5f/linux into drm...
authorDave Airlie <airlied@redhat.com>
Fri, 15 Feb 2019 01:46:40 +0000 (11:46 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 15 Feb 2019 01:46:51 +0000 (11:46 +1000)
amdgpu:
- Vega20 psp fix
- Add vrr range to debugfs for freesync debugging

sched:
- Scheduler race fix

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190213202958.3336-1-alexander.deucher@amd.com
348 files changed:
Documentation/ABI/stable/sysfs-driver-mlxreg-io
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-shc.dts
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/imx6q-pistachio.dts
arch/arm/boot/dts/imx6sll-evk.dts
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/meson.dtsi
arch/arm/boot/dts/meson8b-ec100.dts
arch/arm/boot/dts/meson8b-odroidc1.dts
arch/arm/boot/dts/meson8m2-mxiii-plus.dts
arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap5-l4.dtsi
arch/arm/boot/dts/r8a7743.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
arch/arm/boot/dts/vf610-bk4.dts
arch/arm/mach-iop32x/n2100.c
arch/arm/mach-tango/pm.c
arch/arm/mach-tango/pm.h [new file with mode: 0644]
arch/arm/mach-tango/setup.c
arch/arm/plat-pxa/ssp.c
arch/arm/xen/mm.c
arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/renesas/r8a774a1.dtsi
arch/arm64/boot/dts/renesas/r8a7796.dtsi
arch/arm64/boot/dts/renesas/r8a77965.dtsi
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/mm/dump.c
arch/m68k/emu/nfblock.c
arch/mips/Kconfig
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/boot/dts/ingenic/jz4740.dtsi
arch/mips/boot/dts/xilfpga/nexys4ddr.dts
arch/mips/include/asm/atomic.h
arch/mips/include/asm/barrier.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/pgtable.h
arch/mips/kernel/mips-cm.c
arch/mips/kernel/process.c
arch/mips/loongson64/Platform
arch/mips/loongson64/common/reset.c
arch/mips/mm/tlbex.c
arch/mips/pci/pci-octeon.c
arch/mips/vdso/Makefile
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/x86/boot/compressed/head_64.S
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/pgtable.h
arch/x86/kernel/cpu/mce/core.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c
arch/x86/mm/pageattr.c
block/blk-iolatency.c
block/blk-mq-debugfs.c
block/blk-mq.h
drivers/acpi/bus.c
drivers/android/binder.c
drivers/android/binder_internal.h
drivers/android/binderfs.c
drivers/ata/libata-core.c
drivers/base/cacheinfo.c
drivers/dma/at_xdmac.c
drivers/dma/bcm2835-dma.c
drivers/dma/dmatest.c
drivers/dma/imx-dma.c
drivers/firmware/arm_scmi/bus.c
drivers/fpga/stratix10-soc.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/rockchip/rockchip_rgb.c
drivers/gpu/drm/rockchip/rockchip_rgb.h
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/vkms/vkms_crc.c
drivers/gpu/drm/vkms/vkms_crtc.c
drivers/gpu/drm/vkms/vkms_drv.c
drivers/gpu/drm/vkms/vkms_drv.h
drivers/gpu/drm/vkms/vkms_gem.c
drivers/gpu/drm/vkms/vkms_output.c
drivers/gpu/drm/vkms/vkms_plane.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/hid/hid-debug.c
drivers/i2c/busses/i2c-omap.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/ti-ads8688.c
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mmp.c
drivers/isdn/mISDN/timerdev.c
drivers/md/dm-rq.c
drivers/md/dm.c
drivers/misc/mei/client.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/mic/vop/vop_main.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_bbt.c
drivers/mtd/nand/spi/core.c
drivers/net/dsa/b53/b53_srab.c
drivers/net/dsa/mv88e6xxx/global1_atu.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/freescale/fec_mpc52xx.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/sunbmac.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/fddi/defxx.c
drivers/net/geneve.c
drivers/net/ieee802154/mcr20a.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/dp83640.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/virtio_net.c
drivers/net/wan/dscc4.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/quirks.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/mediatek/Kconfig
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pinctrl/sunxi/pinctrl-sunxi.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/cxlflash/main.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/sd_zbc.c
drivers/soc/fsl/qbman/qman.c
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/speakup/spk_ttyio.c
drivers/target/target_core_configfs.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musbhsdma.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-am335x.c
drivers/usb/typec/tcpm/tcpm.c
drivers/virtio/virtio_ring.c
fs/aio.c
fs/buffer.c
fs/debugfs/inode.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/nfsd/vfs.c
fs/xfs/scrub/repair.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/dt-bindings/clock/imx8mq-clock.h
include/linux/blktrace_api.h
include/linux/filter.h
include/linux/hid-debug.h
include/linux/irqchip/arm-gic-v3.h
include/linux/netdevice.h
include/linux/signal.h
include/linux/stmmac.h
include/net/l3mdev.h
include/net/netfilter/nf_tables.h
include/sound/compress_driver.h
include/sound/hda_codec.h
include/uapi/linux/virtio_config.h
include/uapi/linux/virtio_ring.h
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/hashtab.c
kernel/bpf/percpu_freelist.c
kernel/bpf/percpu_freelist.h
kernel/bpf/syscall.c
kernel/events/ring_buffer.c
kernel/futex.c
kernel/locking/rtmutex.c
kernel/relay.c
kernel/signal.c
kernel/trace/bpf_trace.c
kernel/trace/trace_uprobe.c
lib/test_rhashtable.c
net/batman-adv/bat_v_elp.c
net/batman-adv/hard-interface.c
net/batman-adv/soft-interface.c
net/core/filter.c
net/core/skmsg.c
net/dccp/ccid.h
net/dsa/master.c
net/dsa/slave.c
net/ipv4/ip_gre.c
net/ipv6/ip6_gre.c
net/ipv6/netfilter.c
net/ipv6/seg6_iptunnel.c
net/ipv6/sit.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/tx.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_compat.c
net/netfilter/nft_dynset.c
net/netfilter/nft_immediate.c
net/netfilter/nft_lookup.c
net/netfilter/nft_objref.c
net/rds/bind.c
net/rxrpc/recvmsg.c
net/sched/cls_flower.c
net/sctp/socket.c
net/sctp/stream.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/smc/smc_cdc.h
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ib.c
net/smc/smc_llc.c
net/smc/smc_pnet.c
net/smc/smc_tx.c
net/smc/smc_wr.c
net/smc/smc_wr.h
net/socket.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/vmw_vsock/virtio_transport.c
net/wireless/ap.c
net/wireless/core.h
net/wireless/sme.c
samples/mei/mei-amt-version.c
sound/pci/hda/hda_bind.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/bpf/bpftool/common.c
tools/bpf/bpftool/map.c
tools/bpf/bpftool/prog.c
tools/iio/iio_generic_buffer.c
tools/include/uapi/linux/in.h
tools/perf/Documentation/perf-c2c.txt
tools/perf/Documentation/perf-mem.txt
tools/perf/arch/powerpc/util/Build
tools/perf/arch/powerpc/util/mem-events.c [new file with mode: 0644]
tools/perf/builtin-trace.c
tools/perf/tests/attr.py
tools/perf/tests/evsel-tp-sched.c
tools/perf/util/c++/clang.cpp
tools/perf/util/mem-events.c
tools/perf/util/symbol-elf.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/bpf_util.h
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/filesystems/binderfs/.gitignore [new file with mode: 0644]
tools/testing/selftests/filesystems/binderfs/Makefile [new file with mode: 0644]
tools/testing/selftests/filesystems/binderfs/binderfs_test.c [new file with mode: 0644]
tools/testing/selftests/filesystems/binderfs/config [new file with mode: 0644]
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/config
tools/testing/selftests/netfilter/nft_nat.sh [new file with mode: 0755]
virt/kvm/kvm_main.c

index 9b642669cb160c6e1535022c31628818faa37fdb..169fe08a649baff3bb1643c6c74150ea1ce87807 100644 (file)
@@ -24,7 +24,7 @@ What:         /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
                                                        cpld3_version
 
 Date:          November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
 Description:   These files show with which CPLD versions have been burned
                on LED board.
@@ -35,7 +35,7 @@ What:         /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
                                                        jtag_enable
 
 Date:          November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
 Description:   These files enable and disable the access to the JTAG domain.
                By default access to the JTAG domain is disabled.
@@ -105,7 +105,7 @@ What:               /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
                                                reset_voltmon_upgrade_fail
 
 Date:          November 2018
-KernelVersion: 4.21
+KernelVersion: 5.0
 Contact:       Vadim Pasternak <vadimpmellanox.com>
 Description:   These files show the system reset cause, as following: ComEx
                power fail, reset from ComEx, system platform reset, reset
index b799bcf67d7b5ae080694467442b113b49c92567..858b6c0b9a15ae71853645013c718dc0398804d4 100644 (file)
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
-               sm_off [Default Off]
-                       By default, scalable mode will be supported if the
+               sm_on [Default Off]
+                       By default, scalable mode will be disabled even if the
                        hardware advertises that it has support for the scalable
                        mode translation. With this option set, scalable mode
-                       will not be used even on hardware which claims to support
-                       it.
+                       will be used on hardware which claims to support it.
                tboot_noforce [Default Off]
                        Do not force the Intel IOMMU enabled under tboot.
                        By default, tboot will force Intel IOMMU on, which
index 8c68de3cfd80ef991caad020c9b02d7b0499eb99..9919840d54cde80896e5f6f18bc03a0b9b8f8ffc 100644 (file)
@@ -2848,6 +2848,9 @@ F:        include/uapi/linux/if_bonding.h
 BPF (Safe dynamic programs and tools)
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
+R:     Martin KaFai Lau <kafai@fb.com>
+R:     Song Liu <songliubraving@fb.com>
+R:     Yonghong Song <yhs@fb.com>
 L:     netdev@vger.kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
@@ -2873,6 +2876,8 @@ F:        samples/bpf/
 F:     tools/bpf/
 F:     tools/lib/bpf/
 F:     tools/testing/selftests/bpf/
+K:     bpf
+N:     bpf
 
 BPF JIT for ARM
 M:     Shubham Bansal <illusionist.neo@gmail.com>
@@ -5181,7 +5186,7 @@ DRM DRIVERS FOR XEN
 M:     Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 L:     dri-devel@lists.freedesktop.org
-L:     xen-devel@lists.xen.org
+L:     xen-devel@lists.xenproject.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/gpu/drm/xen/
 F:     Documentation/gpu/xen-front.rst
@@ -11307,10 +11312,12 @@ F:    include/dt-bindings/
 
 OPENCORES I2C BUS DRIVER
 M:     Peter Korsgaard <peter@korsgaard.com>
+M:     Andrew Lunn <andrew@lunn.ch>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-ocores
 F:     drivers/i2c/busses/i2c-ocores.c
+F:     include/linux/platform_data/i2c-ocores.h
 
 OPENRISC ARCHITECTURE
 M:     Jonas Bonn <jonas@southpole.se>
@@ -12868,6 +12875,13 @@ F:     Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
 F:     drivers/net/dsa/realtek-smi*
 F:     drivers/net/dsa/rtl83*
 
+REDPINE WIRELESS DRIVER
+M:     Amitkumar Karwar <amitkarwar@gmail.com>
+M:     Siva Rebbagondla <siva8118@gmail.com>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wireless/rsi/
+
 REGISTER MAP ABSTRACTION
 M:     Mark Brown <broonie@kernel.org>
 L:     linux-kernel@vger.kernel.org
@@ -13696,6 +13710,15 @@ L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
 
+SFF/SFP/SFP+ MODULE SUPPORT
+M:     Russell King <linux@armlinux.org.uk>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/phy/phylink.c
+F:     drivers/net/phy/sfp*
+F:     include/linux/phylink.h
+F:     include/linux/sfp.h
+
 SGI GRU DRIVER
 M:     Dimitri Sivanich <sivanich@sgi.com>
 S:     Maintained
index 3142e67d03f1bc67d0e0f6ef480baab00a6c6612..86cf35d1d79d7fdcc33fbf503eca37b0e5ad57c9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index d0fd68873689cfc8b37344a077ade06ae2d09851..5b250060f6ddc2366a556f9ac394b01cbfa43003 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
        bus-width = <0x4>;
-       cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
        cd-inverted;
        max-frequency = <26000000>;
        vmmc-supply = <&vmmcsd_fixed>;
index 47aa53ba6b92236d4616992aeea113a81bfb8c97..559659b399d04d6d9642d673d335772bc8274b5f 100644 (file)
                clocksource: timer@20000 {
                        compatible = "ti,da830-timer";
                        reg = <0x20000 0x1000>;
-                       interrupts = <12>, <13>;
+                       interrupts = <21>, <22>;
                        interrupt-names = "tint12", "tint34";
                        clocks = <&pll0_auxclk>;
                };
index 5edf858c8b860e840c8515d094a1b13fe8f7ef91..a31b17eaf51c2cfc7ff00420848af6506d74f52b 100644 (file)
                power {
                        label = "Power Button";
                        gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_POWER>;
                };
        };
index d8163705363ec3580c8068e66ad881f23efb5aa8..4a31a415f88e295fa37881b2991f4edaed04024f 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&reg_sd3_vmmc>;
        status = "okay";
 };
index 272ff6133ec1803b8f23d8eaa5d26f2d35d3d341..d1375d3650fdc38ca313977fe788bee4de1bbbb6 100644 (file)
                        };
 
                        gpt: gpt@2098000 {
-                               compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
+                               compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
                                reg = <0x02098000 0x4000>;
                                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SX_CLK_GPT_BUS>,
index e4645f61271244fedeb0d128a6ad4535b452b6b2..2ab74860d962212f6377ee50de34b67fd0a01fe8 100644 (file)
                        compatible = "amlogic,meson6-dwmac", "snps,dwmac";
                        reg = <0xc9410000 0x10000
                               0xc1108108 0x4>;
-                       interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
+                       interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "macirq";
                        status = "disabled";
                };
index 0872f6e3abf56f02c1f4d3f7423a3a6d17bda1a4..d50fc2f60fa31fc24e296de7f3ed2af3e4834321 100644 (file)
                cap-sd-highspeed;
                disable-wp;
 
-               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-               cd-inverted;
+               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
                vmmc-supply = <&vcc_3v3>;
        };
index 58669abda2594d4979086695db3af0aee095983b..0f0a46ddf3ff2e9d3fa8148803d01d5b28897e1b 100644 (file)
                /* Realtek RTL8211F (0x001cc916) */
                eth_phy: ethernet-phy@0 {
                        reg = <0>;
-                       eee-broken-1000t;
                        interrupt-parent = <&gpio_intc>;
                        /* GPIOH_3 */
                        interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
                cap-sd-highspeed;
                disable-wp;
 
-               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-               cd-inverted;
+               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
                vmmc-supply = <&tflash_vdd>;
                vqmmc-supply = <&tf_io>;
index f5853610b20b804170e941c5b24d2b50b94a1d62..6ac02beb5fa724c34706dcbf522312eedf09d618 100644 (file)
                cap-sd-highspeed;
                disable-wp;
 
-               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-               cd-inverted;
+               cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
                vmmc-supply = <&vcc_3v3>;
        };
index ddc7a7bb33c0de974a74ecf754f0a4644c5fc46b..f57acf8f66b95d1a2f1e17899e97b61a433c43da 100644 (file)
                        interrupts-extended = <
                                &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
                                &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
-                               &cpcap 48 1
+                               &cpcap 48 0
                        >;
                        interrupt-names =
                                "id_ground", "id_float", "se0conn", "vbusvld",
index e53d326913080f34e180996bfbed8e8a6320ad69..93b420934e8ee96115b41b80c9f78b842cae0d8b 100644 (file)
 
        vdda-supply = <&vdac>;
 
-       #address-cells = <1>;
-       #size-cells = <0>;
-
        port {
-               reg = <0>;
                venc_out: endpoint {
                        remote-endpoint = <&opa_in>;
                        ti,channels = <1>;
index 182a53991c901a387317db9710bceb9d0d1d5bb6..826920e6b8787ee10e06753795d7224f84e84158 100644 (file)
        /* For debugging, it is often good idea to remove this GPIO.
           It means you can remove back cover (to reboot by removing
           battery) and still use the MMC card. */
-       cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */
+       cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
 };
 
 /* most boards use vaux3, only some old versions use vmmc2 instead */
index 0d9b85317529b317ce9b8788c8cdbde14c7c53c7..e142e6c70a59fa7deb3ffee947b212ec50ba0fac 100644 (file)
                compatible = "ti,omap2-onenand";
                reg = <0 0 0x20000>;    /* CS0, offset 0, IO size 128K */
 
+               /*
+                * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+                * bootloader set values when booted with v4.19 using both N950
+                * and N9 devices (OneNAND Manufacturer: Samsung):
+                *
+                *   gpmc cs0 before gpmc_cs_program_settings:
+                *   cs0 GPMC_CS_CONFIG1: 0xfd001202
+                *   cs0 GPMC_CS_CONFIG2: 0x00181800
+                *   cs0 GPMC_CS_CONFIG3: 0x00030300
+                *   cs0 GPMC_CS_CONFIG4: 0x18001804
+                *   cs0 GPMC_CS_CONFIG5: 0x03171d1d
+                *   cs0 GPMC_CS_CONFIG6: 0x97080000
+                */
                gpmc,sync-read;
                gpmc,sync-write;
                gpmc,burst-length = <16>;
                gpmc,device-width = <2>;
                gpmc,mux-add-data = <2>;
                gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <87>;
-               gpmc,cs-wr-off-ns = <87>;
+               gpmc,cs-rd-off-ns = <122>;
+               gpmc,cs-wr-off-ns = <122>;
                gpmc,adv-on-ns = <0>;
-               gpmc,adv-rd-off-ns = <10>;
-               gpmc,adv-wr-off-ns = <10>;
-               gpmc,oe-on-ns = <15>;
-               gpmc,oe-off-ns = <87>;
+               gpmc,adv-rd-off-ns = <15>;
+               gpmc,adv-wr-off-ns = <15>;
+               gpmc,oe-on-ns = <20>;
+               gpmc,oe-off-ns = <122>;
                gpmc,we-on-ns = <0>;
-               gpmc,we-off-ns = <87>;
-               gpmc,rd-cycle-ns = <112>;
-               gpmc,wr-cycle-ns = <112>;
-               gpmc,access-ns = <81>;
+               gpmc,we-off-ns = <122>;
+               gpmc,rd-cycle-ns = <148>;
+               gpmc,wr-cycle-ns = <148>;
+               gpmc,access-ns = <117>;
                gpmc,page-burst-access-ns = <15>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,wait-monitoring-ns = <0>;
-               gpmc,clk-activation-ns = <5>;
-               gpmc,wr-data-mux-bus-ns = <30>;
-               gpmc,wr-access-ns = <81>;
-               gpmc,sync-clk-ps = <15000>;
+               gpmc,clk-activation-ns = <10>;
+               gpmc,wr-data-mux-bus-ns = <40>;
+               gpmc,wr-access-ns = <117>;
+
+               gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
 
                /*
                 * MTD partition table corresponding to Nokia's MeeGo 1.2
index 9c7e309d9c2cc20e8db86a1ef51bdf20c18995d2..0960348002ad85fca5326dc55caab39b7bcaeffa 100644 (file)
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        ti,syss-mask = <1>;
-                       ti,no-reset-on-init;
-                       ti,no-idle-on-init;
                        /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */
                        clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>;
                        clock-names = "fck";
index 3cc33f7ff7febae7e8eebb586f180d0c07c02b3c..3adc158a40bb34f0d8954163595333293ce21653 100644 (file)
 
                du: display@feb00000 {
                        compatible = "renesas,du-r8a7743";
-                       reg = <0 0xfeb00000 0 0x40000>,
-                             <0 0xfeb90000 0 0x1c>;
-                       reg-names = "du", "lvds.0";
+                       reg = <0 0xfeb00000 0 0x40000>;
                        interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&cpg CPG_MOD 724>,
-                                <&cpg CPG_MOD 723>,
-                                <&cpg CPG_MOD 726>;
-                       clock-names = "du.0", "du.1", "lvds.0";
+                                <&cpg CPG_MOD 723>;
+                       clock-names = "du.0", "du.1";
                        status = "disabled";
 
                        ports {
                                port@1 {
                                        reg = <1>;
                                        du_out_lvds0: endpoint {
+                                               remote-endpoint = <&lvds0_in>;
+                                       };
+                               };
+                       };
+               };
+
+               lvds0: lvds@feb90000 {
+                       compatible = "renesas,r8a7743-lvds";
+                       reg = <0 0xfeb90000 0 0x1c>;
+                       clocks = <&cpg CPG_MOD 726>;
+                       power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+                       resets = <&cpg 726>;
+                       status = "disabled";
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       lvds0_in: endpoint {
+                                               remote-endpoint = <&du_out_lvds0>;
+                                       };
+                               };
+                               port@1 {
+                                       reg = <1>;
+                                       lvds0_out: endpoint {
                                        };
                                };
                        };
index 353d90f99b406e9e267e56bd84e082968ae433e2..13304b8c51390de7c31f67b1850ed5aca931a29f 100644 (file)
                        #clock-cells = <0>;
                        compatible = "fixed-clock";
                        clock-frequency = <24000000>;
+                       clock-output-names = "osc24M";
                };
 
                osc32k: clk-32k {
index 5d23667dc2d2e72f3fc557c0fa387c69b45ac3a2..25540b7694d590dea1c3c54c5453d4a7217d9f37 100644 (file)
@@ -53,7 +53,7 @@
 
        aliases {
                serial0 = &uart0;
-               /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+               ethernet0 = &emac;
                ethernet1 = &sdiowifi;
        };
 
index 689c8930dce35ceb9b54f94d796ce3283e4be482..b08d561d6748ed1b1d4cfb99890748834672198c 100644 (file)
        bus-num = <3>;
        status = "okay";
        spi-slave;
+       #address-cells = <0>;
 
-       slave@0 {
+       slave {
                compatible = "lwn,bk4";
                spi-max-frequency = <30000000>;
-               reg = <0>;
        };
 };
 
index 3b73813c6b0434f93c85bf4256ae70049de8ae2c..23e8c93515d4ec5d0648a5be2f1a900e0f97ca8d 100644 (file)
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
 /*
  * N2100 PCI.
  */
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        int irq;
 
index 028e50c6383fa4b1a15b36e23f961161ffb76dbe..a32c3b631484a9f2751fe1750a1750284aa419d4 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/suspend.h>
 #include <asm/suspend.h>
 #include "smc.h"
+#include "pm.h"
 
 static int tango_pm_powerdown(unsigned long arg)
 {
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
        .valid = suspend_valid_only_mem,
 };
 
-static int __init tango_pm_init(void)
+void __init tango_pm_init(void)
 {
        suspend_set_ops(&tango_pm_ops);
-       return 0;
 }
-
-late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
new file mode 100644 (file)
index 0000000..35ea705
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SUSPEND
+void __init tango_pm_init(void);
+#else
+#define tango_pm_init NULL
+#endif
index 677dd7b5efd9007412a5e7047573f02594337ef4..824f90737b044145378137cbecd795168cc09a4f 100644 (file)
@@ -2,6 +2,7 @@
 #include <asm/mach/arch.h>
 #include <asm/hardware/cache-l2x0.h>
 #include "smc.h"
+#include "pm.h"
 
 static void tango_l2c_write(unsigned long val, unsigned int reg)
 {
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
        .dt_compat      = tango_dt_compat,
        .l2c_aux_mask   = ~0,
        .l2c_write_sec  = tango_l2c_write,
+       .init_late      = tango_pm_init,
 MACHINE_END
index ed36dcab80f1e7fb1a89cb41bc05ed71a1afd66f..f519199741837664df922fc60e31c42de8eab145 100644 (file)
@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
        if (ssp == NULL)
                return -ENODEV;
 
-       iounmap(ssp->mmio_base);
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
        list_del(&ssp->node);
        mutex_unlock(&ssp_lock);
 
-       kfree(ssp);
        return 0;
 }
 
index cb44aa290e73c5290586f737fe5ffae2f108bae6..e1d44b903dfc3fd7f9d252ba266cc1e64ab20c61 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
 
index b0c64f75792c163bd7d0000ea023541079fbd1be..8974b5a1d3b1ecb2da4c22a0f8c780d6794b33a6 100644 (file)
                reg = <0x3a3>;
                interrupt-parent = <&r_intc>;
                interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+               x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
        };
 };
 
index 837a03dee875c45c3c73663b35bbc78bb0f677cb..2abb335145a68a49d102e9e1e668246e6a21957d 100644 (file)
                };
 
                video-codec@1c0e000 {
-                       compatible = "allwinner,sun50i-h5-video-engine";
+                       compatible = "allwinner,sun50i-a64-video-engine";
                        reg = <0x01c0e000 0x1000>;
                        clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>,
                                 <&ccu CLK_DRAM_VE>;
index e14e0ce7e89fed32970003859a30841915fcc9fc..016641a41694a4d0500a8db4f6340f19c4252be1 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_boot>;
index 8cd50b75171de6f2d64f9732d36e4b280510b48f..ade2ee09ae9624cb1b89fcc5588649b4233dbc7d 100644 (file)
        max-frequency = <200000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddio_ao3v3>;
        vqmmc-supply = <&vddio_tf>;
index 4cf7f6e80c6a00e4d2838f1552b976d96cf0860b..25105ac96d5596cf2a0abf412cccfa2ebfd7d502 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_card>;
index 2e1cd5e3a246c48ef27c391497bb30e315f1539d..1cc9dc68ef00be8b30bd45754be1e79123c09963 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&tflash_vdd>;
        vqmmc-supply = <&tf_io>;
index ce862266b9aac89f2895406ad1f3c5ed3e8a4813..0be0f2a5d2fe918e2f1a43b798f2ee4899719bb9 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_card>;
index 93a4acf2c46c50d9fc5110f0cff639308883a536..ad4d50bd9d7756043757139faa0a74dc9a598c52 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vcc_3v3>;
 };
index ec09bb5792b71078b72d95ac9650a73c2d5f29c4..2d2db783c44c149d7c5bf6b4824da82f885e7261 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vcc_3v3>;
index f1c410e2da2b6f1e71e3ccafdab23548172d1e8e..796baea7a0bfb7f89b083d19bf034214cb7e7ca2 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_card>;
index db293440e4caedea4e86d5ffc842ada6e42c4050..255cede7b4476ee7ae31007bb5f8fb139fcff05d 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vcc_3v3>;
        vqmmc-supply = <&vcc_card>;
index 6739697be1defd72693e1e867c172a85cfe643ba..9cbdb85fb591735f8c890a9c43d5a5b4e3911205 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_card>;
index a1b31013ab6e3494d810619fadf81752a67b94f4..bc811a2faf4207dd42da6586cff3e013499f3b5e 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_boot>;
index 3c3a667a8df8bf838e5f6b1f28783e275e5917d7..3f086ed7de05525c3bcf11866133c085fb3e7bd4 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_boot>;
index f7a1cffab4a886e48dd35515213a0575419481c6..8acfd40090d2e0b558f5e8bc2f6201ff5dad5560 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_boot>;
index 7212dc4531e49ea5db17be7ca4e8252123472544..7fa20a8ede171e745610a3ad21de2b3ff07e24e8 100644 (file)
        max-frequency = <100000000>;
        disable-wp;
 
-       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
-       cd-inverted;
+       cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
 
        vmmc-supply = <&vddao_3v3>;
        vqmmc-supply = <&vddio_boot>;
index 99b7495455a621742b49523c304b07f8cbf27ca2..838e32cc14c9273db473aa5bcce1f54aeaab676c 100644 (file)
                };
 
                intc: interrupt-controller@9bc0000 {
-                       compatible = "arm,gic-v3";
+                       compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
                        #interrupt-cells = <3>;
                        interrupt-controller;
                        #redistributor-regions = <1>;
index 20745a8528c50c44f35e68d5207121bc9e740c01..719ed9d9067d767e820c778968b9a15b60bce5ab 100644 (file)
                                 <&cpg CPG_CORE R8A774A1_CLK_S3D1>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
+                       dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+                              <&dmac2 0x13>, <&dmac2 0x12>;
+                       dma-names = "tx", "rx", "tx", "rx";
                        power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
                        resets = <&cpg 310>;
                        status = "disabled";
index afedbf5728ec54b5ef9270b2cc78d424f4e08bb6..0648d12778edc09ffe038650b9414cf012b8fe40 100644 (file)
                                 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
+                       dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+                              <&dmac2 0x13>, <&dmac2 0x12>;
+                       dma-names = "tx", "rx", "tx", "rx";
                        power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
                        resets = <&cpg 310>;
                        status = "disabled";
index 6dc9b1fef83037749f52ba9449691256caff35ba..4b3730f640efa78b6cec3db68973585d6b14ca8e 100644 (file)
                                 <&cpg CPG_CORE R8A77965_CLK_S3D1>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
+                       dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+                              <&dmac2 0x13>, <&dmac2 0x12>;
+                       dma-names = "tx", "rx", "tx", "rx";
                        power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
                        resets = <&cpg 310>;
                        status = "disabled";
index f2c211a6229baa3928ba4a652e9c4a947fe27dae..58871333737a4e5458d723dcb0fd6e5e609548da 100644 (file)
@@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image,
 {
        void *buf;
        size_t buf_size;
+       size_t cmdline_len;
        int ret;
 
+       cmdline_len = cmdline ? strlen(cmdline) : 0;
        buf_size = fdt_totalsize(initial_boot_params)
-                       + strlen(cmdline) + DTB_EXTRA_SPACE;
+                       + cmdline_len + DTB_EXTRA_SPACE;
 
        for (;;) {
                buf = vmalloc(buf_size);
index fcb1f2a6d7c66d779a752a4d926123e7f0822b34..99bb8facb5cbc34e22e2df37fc6267a0e3d2f3f5 100644 (file)
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 }
 
-static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
+static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
+                    unsigned long end)
 {
-       pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
-       unsigned long addr;
-       unsigned i;
+       unsigned long addr = start;
+       pte_t *ptep = pte_offset_kernel(pmdp, start);
 
-       for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
-               addr = start + i * PAGE_SIZE;
+       do {
                note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
-       }
+       } while (ptep++, addr += PAGE_SIZE, addr != end);
 }
 
-static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
+static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
+                    unsigned long end)
 {
-       pmd_t *pmdp = pmd_offset(pudp, 0UL);
-       unsigned long addr;
-       unsigned i;
+       unsigned long next, addr = start;
+       pmd_t *pmdp = pmd_offset(pudp, start);
 
-       for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
+       do {
                pmd_t pmd = READ_ONCE(*pmdp);
+               next = pmd_addr_end(addr, end);
 
-               addr = start + i * PMD_SIZE;
                if (pmd_none(pmd) || pmd_sect(pmd)) {
                        note_page(st, addr, 3, pmd_val(pmd));
                } else {
                        BUG_ON(pmd_bad(pmd));
-                       walk_pte(st, pmdp, addr);
+                       walk_pte(st, pmdp, addr, next);
                }
-       }
+       } while (pmdp++, addr = next, addr != end);
 }
 
-static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
+static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
+                    unsigned long end)
 {
-       pud_t *pudp = pud_offset(pgdp, 0UL);
-       unsigned long addr;
-       unsigned i;
+       unsigned long next, addr = start;
+       pud_t *pudp = pud_offset(pgdp, start);
 
-       for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
+       do {
                pud_t pud = READ_ONCE(*pudp);
+               next = pud_addr_end(addr, end);
 
-               addr = start + i * PUD_SIZE;
                if (pud_none(pud) || pud_sect(pud)) {
                        note_page(st, addr, 2, pud_val(pud));
                } else {
                        BUG_ON(pud_bad(pud));
-                       walk_pmd(st, pudp, addr);
+                       walk_pmd(st, pudp, addr, next);
                }
-       }
+       } while (pudp++, addr = next, addr != end);
 }
 
 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
                     unsigned long start)
 {
-       pgd_t *pgdp = pgd_offset(mm, 0UL);
-       unsigned i;
-       unsigned long addr;
+       unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
+       unsigned long next, addr = start;
+       pgd_t *pgdp = pgd_offset(mm, start);
 
-       for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
+       do {
                pgd_t pgd = READ_ONCE(*pgdp);
+               next = pgd_addr_end(addr, end);
 
-               addr = start + i * PGDIR_SIZE;
                if (pgd_none(pgd)) {
                        note_page(st, addr, 1, pgd_val(pgd));
                } else {
                        BUG_ON(pgd_bad(pgd));
-                       walk_pud(st, pgdp, addr);
+                       walk_pud(st, pgdp, addr, next);
                }
-       }
+       } while (pgdp++, addr = next, addr != end);
 }
 
 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
index 38049357d6d3279ff7b95ff801a01e2cb88d9a4b..40712e49381b216dd0ae4af9438e7023bcbeebe3 100644 (file)
@@ -155,18 +155,22 @@ out:
 static int __init nfhd_init(void)
 {
        u32 blocks, bsize;
+       int ret;
        int i;
 
        nfhd_id = nf_get_id("XHDI");
        if (!nfhd_id)
                return -ENODEV;
 
-       major_num = register_blkdev(major_num, "nfhd");
-       if (major_num <= 0) {
+       ret = register_blkdev(major_num, "nfhd");
+       if (ret < 0) {
                pr_warn("nfhd: unable to get major number\n");
-               return major_num;
+               return ret;
        }
 
+       if (!major_num)
+               major_num = ret;
+
        for (i = NFHD_DEV_OFFSET; i < 24; i++) {
                if (nfhd_get_capacity(i, 0, &blocks, &bsize))
                        continue;
index 0d14f51d0002bf5fdeafd47df2d7641441ceb30a..a84c24d894aa4341dca03a1320927100ceb4cb96 100644 (file)
@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT
          please say 'N' here. If you want a high-performance kernel to run on
          new Loongson 3 machines only, please say 'Y' here.
 
+config CPU_LOONGSON3_WORKAROUNDS
+       bool "Old Loongson 3 LLSC Workarounds"
+       default y if SMP
+       depends on CPU_LOONGSON3
+       help
+         Loongson 3 processors have the llsc issues which require workarounds.
+         Without workarounds the system may hang unexpectedly.
+
+         Newer Loongson 3 will fix these issues and no workarounds are needed.
+         The workarounds have no significant side effect on them but may
+         decrease the performance of the system so this option should be
+         disabled unless the kernel is intended to be run on old systems.
+
+         If unsure, please say Y.
+
 config CPU_LOONGSON2E
        bool "Loongson 2E"
        depends on SYS_HAS_CPU_LOONGSON2E
index 50cff3cbcc6de6386d11b07cdbe5a10b60ef976f..4f7b1fa31cf53f04517e51a72a9e522487454550 100644 (file)
@@ -76,7 +76,7 @@
        status = "okay";
 
        pinctrl-names = "default";
-       pinctrl-0 = <&pins_uart2>;
+       pinctrl-0 = <&pins_uart3>;
 };
 
 &uart4 {
                bias-disable;
        };
 
-       pins_uart2: uart2 {
-               function = "uart2";
-               groups = "uart2-data", "uart2-hwflow";
+       pins_uart3: uart3 {
+               function = "uart3";
+               groups = "uart3-data", "uart3-hwflow";
                bias-disable;
        };
 
index 6fb16fd240353aa733f2fec5178db98541184bcc..2beb78a62b7dc7e2f6f99c7afc82c788cc8b626e 100644 (file)
                #dma-cells = <2>;
 
                interrupt-parent = <&intc>;
-               interrupts = <29>;
+               interrupts = <20>;
 
                clocks = <&cgu JZ4740_CLK_DMA>;
 
index 2152b7ba65fbcdaefc97c8f9fd13cd50568ca15b..cc8dbea0911fcf5a61761a38997ebe1e9f690074 100644 (file)
                interrupts = <0>;
        };
 
-       axi_i2c: i2c@10A00000 {
+       axi_i2c: i2c@10a00000 {
            compatible = "xlnx,xps-iic-2.00.a";
            interrupt-parent = <&axi_intc>;
            interrupts = <4>;
-           reg = < 0x10A00000 0x10000 >;
+           reg = < 0x10a00000 0x10000 >;
            clocks = <&ext>;
            xlnx,clk-freq = <0x5f5e100>;
            xlnx,family = "Artix7";
            #address-cells = <1>;
            #size-cells = <0>;
 
-           ad7420@4B {
+           ad7420@4b {
                compatible = "adi,adt7420";
-               reg = <0x4B>;
+               reg = <0x4b>;
            };
        } ;
 };
index 43fcd35e295759d7368268d48bfa6534b074f117..94096299fc569280a6b013a011d22de8c2be6fdd 100644 (file)
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                             \
        if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)             \
        if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)            \
        if (kernel_uses_llsc) {                                               \
                int temp;                                                     \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)                      \
        if (kernel_uses_llsc) {                                               \
                long temp;                                                    \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
        if (kernel_uses_llsc) {                                               \
                long temp;                                                    \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
        if (kernel_uses_llsc) {                                               \
                long temp;                                                    \
                                                                              \
+               loongson_llsc_mb();                                           \
                __asm__ __volatile__(                                         \
                "       .set    push                                    \n"   \
                "       .set    "MIPS_ISA_LEVEL"                        \n"   \
index a5eb1bb199a7fdf76087bcea1d63b962e2555606..b7f6ac5e513c9a47e0943c768666f2ccab2144e0 100644 (file)
 #define __smp_mb__before_atomic()      __smp_mb__before_llsc()
 #define __smp_mb__after_atomic()       smp_llsc_mb()
 
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or pref) in between an ll & sc can cause the sc instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the ll in program order may actually
+ *    be executed after the ll - this is the reordering case.
+ *
+ *    In order to avoid this we need to place a memory barrier (ie. a sync
+ *    instruction) prior to every ll instruction, in between it & any earlier
+ *    memory access instructions. Many of these cases are already covered by
+ *    smp_mb__before_llsc() but for the remaining cases, typically ones in
+ *    which multiple CPUs may operate on a memory location but ordering is not
+ *    usually guaranteed, we use loongson_llsc_mb() below.
+ *
+ *    This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an ll & sc with a target outside
+ *    of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
+ *    or similar, then misprediction of the branch may allow speculative
+ *    execution of memory accesses from outside of the ll-sc loop.
+ *
+ *    In order to avoid this we need a memory barrier (ie. a sync instruction)
+ *    at each affected branch target, for which we also use loongson_llsc_mb()
+ *    defined below.
+ *
+ *    This case affects all current Loongson 3 CPUs.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
+#define loongson_llsc_mb()     __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#else
+#define loongson_llsc_mb()     do { } while (0)
+#endif
+
 #include <asm-generic/barrier.h>
 
 #endif /* __ASM_BARRIER_H */
index c4675957b21bca1e5be2cddd37f8af71a24eb6b9..830c93a010c34926b26d3dea33a9f6ac8049ee59 100644 (file)
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+               loongson_llsc_mb();
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # set_bit       \n"
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                } while (unlikely(!temp));
 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
+               loongson_llsc_mb();
                do {
                        __asm__ __volatile__(
                        "       .set    push                            \n"
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                : "ir" (~(1UL << bit)));
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+               loongson_llsc_mb();
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # clear_bit     \n"
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                } while (unlikely(!temp));
 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
+               loongson_llsc_mb();
                do {
                        __asm__ __volatile__(
                        "       .set    push                            \n"
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
 
+               loongson_llsc_mb();
                do {
                        __asm__ __volatile__(
                        "       .set    push                            \n"
index c14d798f38886c15b0f8d8adfbcf7008ad863d74..b83b0397462d9e74cff12be9ee3897b9cf1e29d8 100644 (file)
@@ -50,6 +50,7 @@
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else if (cpu_has_llsc) {                                      \
+               loongson_llsc_mb();                                     \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                  "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
+               loongson_llsc_mb();
                __asm__ __volatile__(
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
+               loongson_llsc_mb();
        } else
                return -ENOSYS;
 
index 57933fc8fd9877d09b4f885a6cb4b4412978e4c2..910851c62db3d3f457d3781175a83a444d3e0b76 100644 (file)
@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                        : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
                        : [global] "r" (page_global));
                } else if (kernel_uses_llsc) {
+                       loongson_llsc_mb();
                        __asm__ __volatile__ (
                        "       .set    push                            \n"
                        "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                        "       .set    pop                             \n"
                        : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
                        : [global] "r" (page_global));
+                       loongson_llsc_mb();
                }
 #else /* !CONFIG_SMP */
                if (pte_none(*buddy))
index 8f5bd04f320a90be3861dd75664fdead350655a3..7f3f136572decc82d666d0fca0515f38d15dd2b3 100644 (file)
@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
        }
 
        /* reprime cause register */
-       write_gcr_error_cause(0);
+       write_gcr_error_cause(cm_error);
 }
index 6829a064aac8f6119da343748f6ccc2e35bde751..339870ed92f79c99e9a0678f9493bbfcaab5787b 100644 (file)
@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
 static int get_frame_info(struct mips_frame_info *info)
 {
        bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
-       union mips_instruction insn, *ip, *ip_end;
+       union mips_instruction insn, *ip;
        const unsigned int max_insns = 128;
        unsigned int last_insn_size = 0;
        unsigned int i;
@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
        if (!ip)
                goto err;
 
-       ip_end = (void *)ip + info->func_size;
-
-       for (i = 0; i < max_insns && ip < ip_end; i++) {
+       for (i = 0; i < max_insns; i++) {
                ip = (void *)ip + last_insn_size;
+
                if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
                        insn.word = ip->halfword[0] << 16;
                        last_insn_size = 2;
index 0fce4608aa88665febfcad2964690beff5000a0d..c1a4d4dc46655fe914c1927cb8088969e5909318 100644 (file)
@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
 endif
 
 cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
+
+#
+# Some versions of binutils, not currently mainline as of 2019/02/04, support
+# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
+# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
+# description).
+#
+# We disable this in order to prevent the assembler meddling with the
+# instruction that labels refer to, ie. if we label an ll instruction:
+#
+# 1: ll v0, 0(a0)
+#
+# ...then with the assembler fix applied the label may actually point at a sync
+# instruction inserted by the assembler, and if we were using the label in an
+# exception table the table would no longer contain the address of the ll
+# instruction.
+#
+# Avoid this by explicitly disabling that assembler behaviour. If upstream
+# binutils does not merge support for the flag then we can revisit & remove
+# this later - for now it ensures vendor toolchains don't cause problems.
+#
+cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
+
 #
 # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
 # as MIPS64 R2; older versions as just R1.  This leaves the possibility open
index a60715e11306b272bc0402a8f8351a2afec5fc48..b26892ce871c87cdf1f8c4e39d5ca81b11bfff31 100644 (file)
@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
 {
 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
        mach_prepare_shutdown();
-       unreachable();
+
+       /*
+        * It needs a wait loop here, but mips/kernel/reset.c already calls
+        * a generic delay loop, machine_hang(), so simply return.
+        */
+       return;
 #else
        void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
 
index 37b1cb246332298bd612cd841352093e6323fa49..65b6e85447b1ebc82364c879da5e8561482a0f08 100644 (file)
@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                 * to mimic that here by taking a load/istream page
                 * fault.
                 */
+               if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+                       uasm_i_sync(p, 0);
                UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
                uasm_i_jr(p, ptr);
 
@@ -1646,6 +1648,8 @@ static void
 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
 {
 #ifdef CONFIG_SMP
+       if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+               uasm_i_sync(p, 0);
 # ifdef CONFIG_PHYS_ADDR_T_64BIT
        if (cpu_has_64bits)
                uasm_i_lld(p, pte, 0, ptr);
@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void)
 #endif
 
        uasm_l_nopage_tlbl(&l, p);
+       if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+               uasm_i_sync(&p, 0);
        build_restore_work_registers(&p);
 #ifdef CONFIG_CPU_MICROMIPS
        if ((unsigned long)tlb_do_page_fault_0 & 1) {
@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void)
 #endif
 
        uasm_l_nopage_tlbs(&l, p);
+       if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+               uasm_i_sync(&p, 0);
        build_restore_work_registers(&p);
 #ifdef CONFIG_CPU_MICROMIPS
        if ((unsigned long)tlb_do_page_fault_1 & 1) {
@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void)
 #endif
 
        uasm_l_nopage_tlbm(&l, p);
+       if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
+               uasm_i_sync(&p, 0);
        build_restore_work_registers(&p);
 #ifdef CONFIG_CPU_MICROMIPS
        if ((unsigned long)tlb_do_page_fault_1 & 1) {
index 5017d5843c5ac4913aa254b2157bd66d86b91bcd..fc29b85cfa926d1b70e69901b2e2c3abc7e46fcc 100644 (file)
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
        if (octeon_has_feature(OCTEON_FEATURE_PCIE))
                return 0;
 
+       if (!octeon_is_pci_host()) {
+               pr_notice("Not in host mode, PCI Controller not initialized\n");
+               return 0;
+       }
+
        /* Point pcibios_map_irq() to the PCI version of it */
        octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
 
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
        else
                octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
 
-       if (!octeon_is_pci_host()) {
-               pr_notice("Not in host mode, PCI Controller not initialized\n");
-               return 0;
-       }
-
        /* PCI I/O and PCI MEM values */
        set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
        ioport_resource.start = 0;
index f6fd340e39c2f4bae451f0ecdab0bd929a041eef..0ede4deb8181ce20300f6bc12cfffaf683b99684 100644 (file)
@@ -8,6 +8,7 @@ ccflags-vdso := \
        $(filter -E%,$(KBUILD_CFLAGS)) \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
        $(filter -march=%,$(KBUILD_CFLAGS)) \
+       $(filter -m%-float,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
 ifdef CONFIG_CC_IS_CLANG
@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
        $(call cmd,force_checksrc)
        $(call if_changed_rule,cc_o_c)
 
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
        $(call if_changed_dep,cpp_lds_S)
 
@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
        $(call cmd,force_checksrc)
        $(call if_changed_rule,cc_o_c)
 
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
        $(call if_changed_dep,cpp_lds_S)
 
index 2e6ada28da645fd6a122d153fedfdc2c62b7843e..c9bfe526ca9df86c0f5b2aab8cbf063cd32ed81b 100644 (file)
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
-                                        struct spinlock *old_pmd_ptl,
-                                        struct vm_area_struct *vma)
-{
-       if (radix_enabled())
-               return false;
-       /*
-        * Archs like ppc64 use pgtable to store per pmd
-        * specific information. So when we switch the pmd,
-        * we should also withdraw and deposit the pgtable
-        */
-       return true;
-}
-
-
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+                                 struct spinlock *old_pmd_ptl,
+                                 struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
 #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
 static inline bool arch_needs_pgtable_deposit(void)
 {
index f3c31f5e1026fca9f2e01da261e1319dccbceaf0..ecd31569a120a716d30d1f030b6732e0298a172b 100644 (file)
@@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m)
                   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
 }
 #endif /* CONFIG_PROC_FS */
+
+/*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+ * location. Hence a pmd move requires deposit and withdraw.
+ *
+ * For radix translation with split pmd ptl, we store the deposited table in the
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
+ * move.
+ *
+ * With hash we use deposited table always irrespective of anon or not.
+ * With radix we use deposited table only for anonymous mapping.
+ */
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+                          struct spinlock *old_pmd_ptl,
+                          struct vm_area_struct *vma)
+{
+       if (radix_enabled())
+               return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+
+       return true;
+}
index 7d6457ab5d3450f0db4d6cc25e2067c80db59f13..bba281b1fe1b0730f8a0d31fc469f324118a8933 100644 (file)
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
 {
        unsigned long ret[PLPAR_HCALL_BUFSIZE];
        uint64_t rc, token;
+       uint64_t saved = 0;
 
        /*
         * When the hypervisor cannot map all the requested memory in a single
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
                                p->blocks, BIND_ANY_ADDR, token);
                token = ret[0];
+               if (!saved)
+                       saved = ret[1];
                cond_resched();
        } while (rc == H_BUSY);
 
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
                return -ENXIO;
        }
 
-       p->bound_addr = ret[1];
+       p->bound_addr = saved;
 
        dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
 
index f105ae8651c9425429982fccabd47a8114edd5ff..f62e347862ccc61ba417d80dabee304ef28b6ec7 100644 (file)
@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src)
 3:
        /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
        pushl   %ecx
+       pushl   %edx
        movl    $MSR_EFER, %ecx
        rdmsr
        btsl    $_EFER_LME, %eax
        wrmsr
+       popl    %edx
        popl    %ecx
 
        /* Enable PAE and LA57 (if required) paging modes */
index 40e12cfc87f62eb5b68490053c0861ac05af18e6..daafb893449b6e1fb11c7f6b18e71d90da553909 100644 (file)
@@ -3558,6 +3558,14 @@ static void free_excl_cntrs(int cpu)
 }
 
 static void intel_pmu_cpu_dying(int cpu)
+{
+       fini_debug_store_on_cpu(cpu);
+
+       if (x86_pmu.counter_freezing)
+               disable_counter_freeze();
+}
+
+static void intel_pmu_cpu_dead(int cpu)
 {
        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
        struct intel_shared_regs *pc;
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu)
        }
 
        free_excl_cntrs(cpu);
-
-       fini_debug_store_on_cpu(cpu);
-
-       if (x86_pmu.counter_freezing)
-               disable_counter_freeze();
 }
 
 static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = {
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
+       .cpu_dead               = intel_pmu_cpu_dead,
 };
 
 static struct attribute *intel_pmu_attrs[];
@@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = {
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
+       .cpu_dead               = intel_pmu_cpu_dead,
+
        .guest_get_msrs         = intel_guest_get_msrs,
        .sched_task             = intel_pmu_sched_task,
 };
index c07bee31abe859c61c53c499e9aabcbe11f1f07b..b10e04387f380342c9b9427012b09b9534a347d1 100644 (file)
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
        .id_table       = snbep_uncore_pci_ids,
 };
 
+#define NODE_ID_MASK   0x7
+
 /*
  * build pci bus to socket mapping
  */
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
                err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
                if (err)
                        break;
-               nodeid = config;
+               nodeid = config & NODE_ID_MASK;
                /* get the Node ID mapping */
                err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
                if (err)
index 40616e805292421b8277b5d2abca04ff3279e0e4..2779ace16d23f21d5cb7b65faf87f384b3b05268 100644 (file)
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                              pmd_t *pmdp, pmd_t pmd)
 {
-       native_set_pmd(pmdp, pmd);
+       set_pmd(pmdp, pmd);
 }
 
 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
index 672c7225cb1b89426b682f40b4a4b33a73803d89..6ce290c506d93e83318f3b13b95d8f5861e02983 100644 (file)
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                        quirk_no_way_out(i, m, regs);
 
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       m->bank = i;
                        mce_read_aux(m, i);
                        *msg = tmp;
                        return 1;
index 8ff20523661b87c5e68ee263cef47debb53f1d1f..d8ea4ebd79e787050e0adfd861bb3df56aa3ec66 100644 (file)
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
+       hrtimer_cancel(&vmx->nested.preemption_timer);
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
        free_vpid(vmx->nested.vpid02);
index 3d27206f6c010b13d408d45a4eb647430bf2e50d..e67ecf25e69087ff6aa8711001a59b175477a3b1 100644 (file)
@@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 
+       /*
+        * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+        * is returned, but our callers are not ready for that and they blindly
+        * call kvm_inject_page_fault.  Ensure that they at least do not leak
+        * uninitialized kernel stack memory into cr2 and error code.
+        */
+       memset(exception, 0, sizeof(*exception));
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
                                          exception);
 }
index 4f8972311a77ef931226996d4c0d86fb7327cd2f..14e6119838a6480d28de8c94f0a4c89273f51e7a 100644 (file)
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
 
 #endif
 
+/*
+ * See set_mce_nospec().
+ *
+ * Machine check recovery code needs to change cache mode of poisoned pages to
+ * UC to avoid speculative access logging another error. But passing the
+ * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
+ * speculative access. So we cheat and flip the top bit of the address. This
+ * works fine for the code that updates the page tables. But at the end of the
+ * process we need to flush the TLB and cache and the non-canonical address
+ * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
+ *
+ * But in the common case we already have a canonical address. This code
+ * will fix the top bit if needed and is a no-op otherwise.
+ */
+static inline unsigned long fix_addr(unsigned long addr)
+{
+#ifdef CONFIG_X86_64
+       return (long)(addr << 1) >> 1;
+#else
+       return addr;
+#endif
+}
+
 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
 {
        if (cpa->flags & CPA_PAGES_ARRAY) {
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data)
        unsigned int i;
 
        for (i = 0; i < cpa->numpages; i++)
-               __flush_tlb_one_kernel(__cpa_addr(cpa, i));
+               __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
 }
 
 static void cpa_flush(struct cpa_data *data, int cache)
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
                 * Only flush present addresses:
                 */
                if (pte && (pte_val(*pte) & _PAGE_PRESENT))
-                       clflush_cache_range_opt((void *)addr, PAGE_SIZE);
+                       clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
        }
        mb();
 }
@@ -1627,29 +1650,6 @@ out:
        return ret;
 }
 
-/*
- * Machine check recovery code needs to change cache mode of poisoned
- * pages to UC to avoid speculative access logging another error. But
- * passing the address of the 1:1 mapping to set_memory_uc() is a fine
- * way to encourage a speculative access. So we cheat and flip the top
- * bit of the address. This works fine for the code that updates the
- * page tables. But at the end of the process we need to flush the cache
- * and the non-canonical address causes a #GP fault when used by the
- * CLFLUSH instruction.
- *
- * But in the common case we already have a canonical address. This code
- * will fix the top bit if needed and is a no-op otherwise.
- */
-static inline unsigned long make_addr_canonical_again(unsigned long addr)
-{
-#ifdef CONFIG_X86_64
-       return (long)(addr << 1) >> 1;
-#else
-       return addr;
-#endif
-}
-
-
 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                                    pgprot_t mask_set, pgprot_t mask_clr,
                                    int force_split, int in_flag,
index fc714ef402a6a5d61840659e8f3e32c2301ae172..2620baa1f6993db5e6706a36240ab4c6a2931038 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/sched/loadavg.h>
 #include <linux/sched/signal.h>
 #include <trace/events/block.h>
+#include <linux/blk-mq.h>
 #include "blk-rq-qos.h"
 #include "blk-stat.h"
 
@@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
        u64 now = ktime_to_ns(ktime_get());
        bool issue_as_root = bio_issue_as_root_blkg(bio);
        bool enabled = false;
+       int inflight = 0;
 
        blkg = bio->bi_blkg;
        if (!blkg || !bio_flagged(bio, BIO_TRACKED))
@@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
                return;
 
        enabled = blk_iolatency_enabled(iolat->blkiolat);
+       if (!enabled)
+               return;
+
        while (blkg && blkg->parent) {
                iolat = blkg_to_lat(blkg);
                if (!iolat) {
@@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
                }
                rqw = &iolat->rq_wait;
 
-               atomic_dec(&rqw->inflight);
-               if (!enabled || iolat->min_lat_nsec == 0)
+               inflight = atomic_dec_return(&rqw->inflight);
+               WARN_ON_ONCE(inflight < 0);
+               if (iolat->min_lat_nsec == 0)
                        goto next;
                iolatency_record_time(iolat, &bio->bi_issue, now,
                                      issue_as_root);
@@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q)
        return 0;
 }
 
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 {
        struct iolatency_grp *iolat = blkg_to_lat(blkg);
-       struct blk_iolatency *blkiolat = iolat->blkiolat;
        u64 oldval = iolat->min_lat_nsec;
 
        iolat->min_lat_nsec = val;
@@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
                                    BLKIOLATENCY_MAX_WIN_SIZE);
 
        if (!oldval && val)
-               atomic_inc(&blkiolat->enabled);
+               return 1;
        if (oldval && !val)
-               atomic_dec(&blkiolat->enabled);
+               return -1;
+       return 0;
 }
 
 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
        u64 lat_val = 0;
        u64 oldval;
        int ret;
+       int enable = 0;
 
        ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
        if (ret)
@@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
        blkg = ctx.blkg;
        oldval = iolat->min_lat_nsec;
 
-       iolatency_set_min_lat_nsec(blkg, lat_val);
+       enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+       if (enable) {
+               WARN_ON_ONCE(!blk_get_queue(blkg->q));
+               blkg_get(blkg);
+       }
+
        if (oldval != iolat->min_lat_nsec) {
                iolatency_clear_scaling(blkg);
        }
@@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
        ret = 0;
 out:
        blkg_conf_finish(&ctx);
+       if (ret == 0 && enable) {
+               struct iolatency_grp *tmp = blkg_to_lat(blkg);
+               struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+               blk_mq_freeze_queue(blkg->q);
+
+               if (enable == 1)
+                       atomic_inc(&blkiolat->enabled);
+               else if (enable == -1)
+                       atomic_dec(&blkiolat->enabled);
+               else
+                       WARN_ON_ONCE(1);
+
+               blk_mq_unfreeze_queue(blkg->q);
+
+               blkg_put(blkg);
+               blk_put_queue(blkg->q);
+       }
        return ret ?: nbytes;
 }
 
@@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
 {
        struct iolatency_grp *iolat = pd_to_lat(pd);
        struct blkcg_gq *blkg = lat_to_blkg(iolat);
+       struct blk_iolatency *blkiolat = iolat->blkiolat;
+       int ret;
 
-       iolatency_set_min_lat_nsec(blkg, 0);
+       ret = iolatency_set_min_lat_nsec(blkg, 0);
+       if (ret == 1)
+               atomic_inc(&blkiolat->enabled);
+       if (ret == -1)
+               atomic_dec(&blkiolat->enabled);
        iolatency_clear_scaling(blkg);
 }
 
index f8120832ca7b8ea44cf872c3810dd373a662656b..7921573aebbc28abd0a57a999792ede908acfd6c 100644 (file)
@@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
 static bool debugfs_create_files(struct dentry *parent, void *data,
                                 const struct blk_mq_debugfs_attr *attr)
 {
+       if (IS_ERR_OR_NULL(parent))
+               return false;
+
        d_inode(parent)->i_private = data;
 
        for (; attr->name; attr++) {
index d943d46b078547e5f48d488575be5178fb0195e5..d0b3dd54ef8dd81cadbde86b8397c4d8e7c21025 100644 (file)
@@ -36,7 +36,6 @@ struct blk_mq_ctx {
        struct kobject          kobj;
 } ____cacheline_aligned_in_smp;
 
-void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
index 5c093ce01bcd8d2102cf14242096c17e3b670f8e..147f6c7ea59c80327babafc45726ffb91c3ad70c 100644 (file)
@@ -1029,6 +1029,9 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
+       /* Initialize debug output. Linux does not use ACPICA defaults */
+       acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
+
 #ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
index cdfc87629efb833213687c8996b17133bea19595..4d2b2ad1ee0e14c65b067077bfb560099ced6f1e 100644 (file)
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name)
 static int __init binder_init(void)
 {
        int ret;
-       char *device_name, *device_names, *device_tmp;
+       char *device_name, *device_tmp;
        struct binder_device *device;
        struct hlist_node *tmp;
+       char *device_names = NULL;
 
        ret = binder_alloc_shrinker_init();
        if (ret)
@@ -5898,23 +5899,29 @@ static int __init binder_init(void)
                                    &transaction_log_fops);
        }
 
-       /*
-        * Copy the module_parameter string, because we don't want to
-        * tokenize it in-place.
-        */
-       device_names = kstrdup(binder_devices_param, GFP_KERNEL);
-       if (!device_names) {
-               ret = -ENOMEM;
-               goto err_alloc_device_names_failed;
-       }
+       if (strcmp(binder_devices_param, "") != 0) {
+               /*
+               * Copy the module_parameter string, because we don't want to
+               * tokenize it in-place.
+                */
+               device_names = kstrdup(binder_devices_param, GFP_KERNEL);
+               if (!device_names) {
+                       ret = -ENOMEM;
+                       goto err_alloc_device_names_failed;
+               }
 
-       device_tmp = device_names;
-       while ((device_name = strsep(&device_tmp, ","))) {
-               ret = init_binder_device(device_name);
-               if (ret)
-                       goto err_init_binder_device_failed;
+               device_tmp = device_names;
+               while ((device_name = strsep(&device_tmp, ","))) {
+                       ret = init_binder_device(device_name);
+                       if (ret)
+                               goto err_init_binder_device_failed;
+               }
        }
 
+       ret = init_binderfs();
+       if (ret)
+               goto err_init_binder_device_failed;
+
        return ret;
 
 err_init_binder_device_failed:
index 7fb97f503ef2b3c56c4f67fd3e2cb4e3e110fb3e..045b3e42d98b8336a9d069a79f38f5803402072a 100644 (file)
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
 }
 #endif
 
+#ifdef CONFIG_ANDROID_BINDERFS
+extern int __init init_binderfs(void);
+#else
+static inline int __init init_binderfs(void)
+{
+       return 0;
+}
+#endif
+
 #endif /* _LINUX_BINDER_INTERNAL_H */
index 6a2185eb66c59761e5d4f0d0f09120806c12dc8b..e773f45d19d932dc1c8f997b5ea9ab50e4291c70 100644 (file)
@@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
        struct inode *inode = NULL;
        struct dentry *root = sb->s_root;
        struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+       bool use_reserve = true;
+#endif
 
        device = kzalloc(sizeof(*device), GFP_KERNEL);
        if (!device)
@@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
 
        /* Reserve a new minor number for the new device. */
        mutex_lock(&binderfs_minors_mutex);
-       minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL);
+       minor = ida_alloc_max(&binderfs_minors,
+                             use_reserve ? BINDERFS_MAX_MINOR :
+                                           BINDERFS_MAX_MINOR_CAPPED,
+                             GFP_KERNEL);
        mutex_unlock(&binderfs_minors_mutex);
        if (minor < 0) {
                ret = minor;
@@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = {
        .fs_flags       = FS_USERNS_MOUNT,
 };
 
-static int __init init_binderfs(void)
+int __init init_binderfs(void)
 {
        int ret;
 
@@ -560,5 +568,3 @@ static int __init init_binderfs(void)
 
        return ret;
 }
-
-device_initcall(init_binderfs);
index b8c3f9e6af8994820c30b40889154f87511014e0..adf28788cab52c1feba478c9eb0b9742f9d78e79 100644 (file)
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
        { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
        { "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
+       { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
 
        /* devices that don't properly handle queued TRIM commands */
        { "Micron_M500IT_*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
index cf78fa6d470d471311dbc372d186ec27f74dc10d..a7359535caf5dfad5ec0c0a95da54d59174b0159 100644 (file)
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
        ct_idx = get_cacheinfo_idx(this_leaf->type);
        propname = cache_type_info[ct_idx].size_prop;
 
-       if (of_property_read_u32(np, propname, &this_leaf->size))
-               this_leaf->size = 0;
+       of_property_read_u32(np, propname, &this_leaf->size);
 }
 
 /* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
        ct_idx = get_cacheinfo_idx(this_leaf->type);
        propname = cache_type_info[ct_idx].nr_sets_prop;
 
-       if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
-               this_leaf->number_of_sets = 0;
+       of_property_read_u32(np, propname, &this_leaf->number_of_sets);
 }
 
 static void cache_associativity(struct cacheinfo *this_leaf)
index 4e557684f792d74f493abf922fbef63ed9045b63..fe69dccfa0c059f14b2a451974fceb8a0575acef 100644 (file)
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
        u32                             save_cim;
        u32                             save_cnda;
        u32                             save_cndc;
+       u32                             irq_status;
        unsigned long                   status;
        struct tasklet_struct           tasklet;
        struct dma_slave_config         sconfig;
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
        struct at_xdmac_desc    *desc;
        u32                     error_mask;
 
-       dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
-                __func__, atchan->status);
+       dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+               __func__, atchan->irq_status);
 
        error_mask = AT_XDMAC_CIS_RBEIS
                     | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
 
        if (at_xdmac_chan_is_cyclic(atchan)) {
                at_xdmac_handle_cyclic(atchan);
-       } else if ((atchan->status & AT_XDMAC_CIS_LIS)
-                  || (atchan->status & error_mask)) {
+       } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+                  || (atchan->irq_status & error_mask)) {
                struct dma_async_tx_descriptor  *txd;
 
-               if (atchan->status & AT_XDMAC_CIS_RBEIS)
+               if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
                        dev_err(chan2dev(&atchan->chan), "read bus error!!!");
-               if (atchan->status & AT_XDMAC_CIS_WBEIS)
+               if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
                        dev_err(chan2dev(&atchan->chan), "write bus error!!!");
-               if (atchan->status & AT_XDMAC_CIS_ROIS)
+               if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
                        dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
 
                spin_lock(&atchan->lock);
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
                        atchan = &atxdmac->chan[i];
                        chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
                        chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
-                       atchan->status = chan_status & chan_imr;
+                       atchan->irq_status = chan_status & chan_imr;
                        dev_vdbg(atxdmac->dma.dev,
                                 "%s: chan%d: imr=0x%x, status=0x%x\n",
                                 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
                                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
                                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 
-                       if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+                       if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
                                at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
 
                        tasklet_schedule(&atchan->tasklet);
index 1a44c8086d77a9e23d16a19f14033963ffb14ef7..ae10f5614f953e5fee68a60b85bddff0947b238e 100644 (file)
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
        }
 }
 
-static int bcm2835_dma_abort(void __iomem *chan_base)
+static int bcm2835_dma_abort(struct bcm2835_chan *c)
 {
-       unsigned long cs;
+       void __iomem *chan_base = c->chan_base;
        long int timeout = 10000;
 
-       cs = readl(chan_base + BCM2835_DMA_CS);
-       if (!(cs & BCM2835_DMA_ACTIVE))
+       /*
+        * A zero control block address means the channel is idle.
+        * (The ACTIVE flag in the CS register is not a reliable indicator.)
+        */
+       if (!readl(chan_base + BCM2835_DMA_ADDR))
                return 0;
 
        /* Write 0 to the active bit - Pause the DMA */
        writel(0, chan_base + BCM2835_DMA_CS);
 
        /* Wait for any current AXI transfer to complete */
-       while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+       while ((readl(chan_base + BCM2835_DMA_CS) &
+               BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
                cpu_relax();
-               cs = readl(chan_base + BCM2835_DMA_CS);
-       }
 
-       /* We'll un-pause when we set of our next DMA */
+       /* Peripheral might be stuck and fail to signal AXI write responses */
        if (!timeout)
-               return -ETIMEDOUT;
-
-       if (!(cs & BCM2835_DMA_ACTIVE))
-               return 0;
-
-       /* Terminate the control block chain */
-       writel(0, chan_base + BCM2835_DMA_NEXTCB);
-
-       /* Abort the whole DMA */
-       writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
-              chan_base + BCM2835_DMA_CS);
+               dev_err(c->vc.chan.device->dev,
+                       "failed to complete outstanding writes\n");
 
+       writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
        return 0;
 }
 
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
 
        spin_lock_irqsave(&c->vc.lock, flags);
 
-       /* Acknowledge interrupt */
-       writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+       /*
+        * Clear the INT flag to receive further interrupts. Keep the channel
+        * active in case the descriptor is cyclic or in case the client has
+        * already terminated the descriptor and issued a new one. (May happen
+        * if this IRQ handler is threaded.) If the channel is finished, it
+        * will remain idle despite the ACTIVE flag being set.
+        */
+       writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+              c->chan_base + BCM2835_DMA_CS);
 
        d = c->desc;
 
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
                if (d->cyclic) {
                        /* call the cyclic callback */
                        vchan_cyclic_callback(&d->vd);
-
-                       /* Keep the DMA engine running */
-                       writel(BCM2835_DMA_ACTIVE,
-                              c->chan_base + BCM2835_DMA_CS);
-               } else {
+               } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
                        vchan_cookie_complete(&c->desc->vd);
                        bcm2835_dma_start_desc(c);
                }
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
        struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
        struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
        unsigned long flags;
-       int timeout = 10000;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&c->vc.lock, flags);
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
        list_del_init(&c->node);
        spin_unlock(&d->lock);
 
-       /*
-        * Stop DMA activity: we assume the callback will not be called
-        * after bcm_dma_abort() returns (even if it does, it will see
-        * c->desc is NULL and exit.)
-        */
+       /* stop DMA activity */
        if (c->desc) {
                vchan_terminate_vdesc(&c->desc->vd);
                c->desc = NULL;
-               bcm2835_dma_abort(c->chan_base);
-
-               /* Wait for stopping */
-               while (--timeout) {
-                       if (!(readl(c->chan_base + BCM2835_DMA_CS) &
-                                               BCM2835_DMA_ACTIVE))
-                               break;
-
-                       cpu_relax();
-               }
-
-               if (!timeout)
-                       dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+               bcm2835_dma_abort(c);
        }
 
        vchan_get_all_descriptors(&c->vc, &head);
index 2eea4ef729153637d79fe187bb02b2645f92ca14..6511928b4cdfe11c944223a8c7e3e8b0f8b9cc96 100644 (file)
@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
                        srcs[i] = um->addr[i] + src_off;
                        ret = dma_mapping_error(dev->dev, um->addr[i]);
                        if (ret) {
-                               dmaengine_unmap_put(um);
                                result("src mapping error", total_tests,
                                       src_off, dst_off, len, ret);
-                               failed_tests++;
-                               continue;
+                               goto error_unmap_continue;
                        }
                        um->to_cnt++;
                }
@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
                                               DMA_BIDIRECTIONAL);
                        ret = dma_mapping_error(dev->dev, dsts[i]);
                        if (ret) {
-                               dmaengine_unmap_put(um);
                                result("dst mapping error", total_tests,
                                       src_off, dst_off, len, ret);
-                               failed_tests++;
-                               continue;
+                               goto error_unmap_continue;
                        }
                        um->bidi_cnt++;
                }
@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
                }
 
                if (!tx) {
-                       dmaengine_unmap_put(um);
                        result("prep error", total_tests, src_off,
                               dst_off, len, ret);
                        msleep(100);
-                       failed_tests++;
-                       continue;
+                       goto error_unmap_continue;
                }
 
                done->done = false;
@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
                cookie = tx->tx_submit(tx);
 
                if (dma_submit_error(cookie)) {
-                       dmaengine_unmap_put(um);
                        result("submit error", total_tests, src_off,
                               dst_off, len, ret);
                        msleep(100);
-                       failed_tests++;
-                       continue;
+                       goto error_unmap_continue;
                }
                dma_async_issue_pending(chan);
 
@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
 
                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 
-               dmaengine_unmap_put(um);
-
                if (!done->done) {
                        result("test timed out", total_tests, src_off, dst_off,
                               len, 0);
-                       failed_tests++;
-                       continue;
+                       goto error_unmap_continue;
                } else if (status != DMA_COMPLETE) {
                        result(status == DMA_ERROR ?
                               "completion error status" :
                               "completion busy status", total_tests, src_off,
                               dst_off, len, ret);
-                       failed_tests++;
-                       continue;
+                       goto error_unmap_continue;
                }
 
+               dmaengine_unmap_put(um);
+
                if (params->noverify) {
                        verbose_result("test passed", total_tests, src_off,
                                       dst_off, len, 0);
@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
                        verbose_result("test passed", total_tests, src_off,
                                       dst_off, len, 0);
                }
+
+               continue;
+
+error_unmap_continue:
+               dmaengine_unmap_put(um);
+               failed_tests++;
        }
        ktime = ktime_sub(ktime_get(), ktime);
        ktime = ktime_sub(ktime, comparetime);
index c2fff3f6c9ca5f49054e1fd41e94d36b849669a9..4a09af3cd546a4c3569d3ed828fa5407fc5aadc8 100644 (file)
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
 {
        struct imxdma_channel *imxdmac = (void *)data;
        struct imxdma_engine *imxdma = imxdmac->imxdma;
-       struct imxdma_desc *desc;
+       struct imxdma_desc *desc, *next_desc;
        unsigned long flags;
 
        spin_lock_irqsave(&imxdma->lock, flags);
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
        list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
 
        if (!list_empty(&imxdmac->ld_queue)) {
-               desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
-                                       node);
+               next_desc = list_first_entry(&imxdmac->ld_queue,
+                                            struct imxdma_desc, node);
                list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
-               if (imxdma_xfer_desc(desc) < 0)
+               if (imxdma_xfer_desc(next_desc) < 0)
                        dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
                                 __func__, imxdmac->channel);
        }
index 472c88ae1c0f9d37ffae6d094a8500ca85ed3146..92f843eaf1e0156cf56d7839cec8d766bff6e86f 100644 (file)
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
 }
 EXPORT_SYMBOL_GPL(scmi_driver_unregister);
 
+static void scmi_device_release(struct device *dev)
+{
+       kfree(to_scmi_dev(dev));
+}
+
 struct scmi_device *
 scmi_device_create(struct device_node *np, struct device *parent, int protocol)
 {
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
        scmi_dev->dev.parent = parent;
        scmi_dev->dev.of_node = np;
        scmi_dev->dev.bus = &scmi_bus_type;
+       scmi_dev->dev.release = scmi_device_release;
        dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
 
        retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@ free_mem:
 void scmi_device_destroy(struct scmi_device *scmi_dev)
 {
        scmi_handle_put(scmi_dev->handle);
-       device_unregister(&scmi_dev->dev);
        ida_simple_remove(&scmi_bus_id, scmi_dev->id);
-       kfree(scmi_dev);
+       device_unregister(&scmi_dev->dev);
 }
 
 void scmi_set_handle(struct scmi_device *scmi_dev)
index a1a09e04fab89de2f1b5399a91ddc85da3f78e96..13851b3d1c565dc9e3878efc53474915b4eec9d3 100644 (file)
@@ -508,14 +508,11 @@ static int __init s10_init(void)
                return -ENODEV;
 
        np = of_find_matching_node(fw_np, s10_of_match);
-       if (!np) {
-               of_node_put(fw_np);
+       if (!np)
                return -ENODEV;
-       }
 
        of_node_put(np);
        ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
-       of_node_put(fw_np);
        if (ret)
                return ret;
 
index 24a7504365596c32c60333ad04e25ba1c26306bf..f91e02c87fd8ca6a28a8005be5899d2fae78cc09 100644 (file)
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
        if (mode->hsync)
                return mode->hsync;
 
-       if (mode->htotal < 0)
+       if (mode->htotal <= 0)
                return 0;
 
        calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
index 216f52b744a637fca6406fe31a25f5d5befa4c2c..c882ea94172c46a645855193d6caaf2dabf968d6 100644 (file)
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+             unsigned long addr, unsigned long size)
+{
+       if (vma->vm_file != filp)
+               return false;
+
+       return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+}
+
 /**
  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
  *                      it is mapped to.
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                        return -EINTR;
                }
                vma = find_vma(mm, addr);
-               if (vma)
+               if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
                        vma->vm_page_prot =
                                pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
                else
index d6c8f8fdfda5f106776e0a148e034e10e64ccbb7..017fc602a10e838586c40a5a5e7dbbd43e433376 100644 (file)
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event)
         * Update the bitmask of enabled events and increment
         * the event reference counter.
         */
-       GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+       BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
+       GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
        GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
        i915->pmu.enable |= BIT_ULL(bit);
        i915->pmu.enable_count[bit]++;
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event)
                engine = intel_engine_lookup_user(i915,
                                                  engine_event_class(event),
                                                  engine_event_instance(event));
-               GEM_BUG_ON(!engine);
-               engine->pmu.enable |= BIT(sample);
 
-               GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+               BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
+                            I915_ENGINE_SAMPLE_COUNT);
+               BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
+                            I915_ENGINE_SAMPLE_COUNT);
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
                GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+
+               engine->pmu.enable |= BIT(sample);
                engine->pmu.enable_count[sample]++;
        }
 
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event)
                engine = intel_engine_lookup_user(i915,
                                                  engine_event_class(event),
                                                  engine_event_instance(event));
-               GEM_BUG_ON(!engine);
-               GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
+               GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
                GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+
                /*
                 * Decrement the reference count and clear the enabled
                 * bitmask when the last listener on an event goes away.
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event)
                        engine->pmu.enable &= ~BIT(sample);
        }
 
-       GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+       GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
        GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
        /*
         * Decrement the reference count and clear the enabled
index 7f164ca3db129472d3262439f5290d505ea6e14a..b3728c5f13e739f7c3a1e5c0d5e1a55deca1c342 100644 (file)
@@ -31,6 +31,8 @@ enum {
        ((1 << I915_PMU_SAMPLE_BITS) + \
         (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
 
+#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
+
 struct i915_pmu_sample {
        u64 cur;
 };
index 0a7d60509ca7527f018a12208316bff91c7c6be8..067054cf4a864e26d6fa29d33056c34c218833c3 100644 (file)
@@ -1790,7 +1790,7 @@ enum i915_power_well_id {
 #define _CNL_PORT_TX_C_LN0_OFFSET              0x162C40
 #define _CNL_PORT_TX_D_LN0_OFFSET              0x162E40
 #define _CNL_PORT_TX_F_LN0_OFFSET              0x162840
-#define _CNL_PORT_TX_DW_GRP(port, dw)  (_PICK((port), \
+#define _CNL_PORT_TX_DW_GRP(dw, port)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_B_GRP_OFFSET, \
                                               _CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1798,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_F_GRP_OFFSET) + \
                                               4 * (dw))
-#define _CNL_PORT_TX_DW_LN0(port, dw)  (_PICK((port), \
+#define _CNL_PORT_TX_DW_LN0(dw, port)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1834,9 @@ enum i915_power_well_id {
 
 #define _CNL_PORT_TX_DW4_LN0_AE                0x162450
 #define _CNL_PORT_TX_DW4_LN1_AE                0x1624D0
-#define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
-#define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
-#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+#define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
+#define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
+#define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
                                           ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
                                                    _CNL_PORT_TX_DW4_LN0_AE)))
 #define ICL_PORT_TX_DW4_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1864,12 @@ enum i915_power_well_id {
 #define   RTERM_SELECT(x)              ((x) << 3)
 #define   RTERM_SELECT_MASK            (0x7 << 3)
 
-#define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
-#define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
+#define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
+#define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
+#define ICL_PORT_TX_DW7_AUX(port)      _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
+#define ICL_PORT_TX_DW7_GRP(port)      _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
+#define ICL_PORT_TX_DW7_LN0(port)      _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
+#define ICL_PORT_TX_DW7_LN(port, ln)   _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
 #define   N_SCALAR(x)                  ((x) << 24)
 #define   N_SCALAR_MASK                        (0x7F << 24)
 
index f3e1d6a0b7dda7cabb0cc882e40aecbd83a1501c..7edce1b7b348e0d8301a47aa3a7e7ba25da3d142 100644 (file)
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
        { 0x2, 0x7F, 0x3F, 0x00, 0x00 },        /* 400   400      0.0   */
 };
 
-struct icl_combo_phy_ddi_buf_trans {
-       u32 dw2_swing_select;
-       u32 dw2_swing_scalar;
-       u32 dw4_scaling;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0xB, 0x70, 0x0018 },  /* 600         0.0   */
-       { 0xB, 0x70, 0x3015 },  /* 600         3.5   */
-       { 0xB, 0x70, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x00, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x00, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
-};
-
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0x4, 0x98, 0x0018 },  /* 600         0.0   */
-       { 0x4, 0x98, 0x3015 },  /* 600         3.5   */
-       { 0x4, 0x98, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x76, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x76, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
+       { 0xA, 0x4F, 0x37, 0x00, 0x08 },        /* 350   500      3.1   */
+       { 0xC, 0x71, 0x2F, 0x00, 0x10 },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x2B, 0x00, 0x14 },        /* 350   900      8.2   */
+       { 0xA, 0x4C, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
+       { 0xC, 0x73, 0x34, 0x00, 0x0B },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x2F, 0x00, 0x10 },        /* 500   900      5.1   */
+       { 0xC, 0x6C, 0x3C, 0x00, 0x03 },        /* 650   700      0.6   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+                                               /* NT mV Trans mV db    */
+       { 0x0, 0x7F, 0x3F, 0x00, 0x00 },        /* 200   200      0.0   */
+       { 0x8, 0x7F, 0x38, 0x00, 0x07 },        /* 200   250      1.9   */
+       { 0x1, 0x7F, 0x33, 0x00, 0x0C },        /* 200   300      3.5   */
+       { 0x9, 0x7F, 0x31, 0x00, 0x0E },        /* 200   350      4.9   */
+       { 0x8, 0x7F, 0x3F, 0x00, 0x00 },        /* 250   250      0.0   */
+       { 0x1, 0x7F, 0x38, 0x00, 0x07 },        /* 250   300      1.6   */
+       { 0x9, 0x7F, 0x35, 0x00, 0x0A },        /* 250   350      2.9   */
+       { 0x1, 0x7F, 0x3F, 0x00, 0x00 },        /* 300   300      0.0   */
+       { 0x9, 0x7F, 0x38, 0x00, 0x07 },        /* 300   350      1.3   */
+       { 0x9, 0x7F, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
 };
 
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
-                               /* Voltage mV  db    */
-       { 0x2, 0x98, 0x0018 },  /* 400         0.0   */
-       { 0x2, 0x98, 0x3015 },  /* 400         3.5   */
-       { 0x2, 0x98, 0x6012 },  /* 400         6.0   */
-       { 0x2, 0x98, 0x900F },  /* 400         9.5   */
-       { 0x4, 0x98, 0x0018 },  /* 600         0.0   */
-       { 0x4, 0x98, 0x3015 },  /* 600         3.5   */
-       { 0x4, 0x98, 0x6012 },  /* 600         6.0   */
-       { 0x5, 0x71, 0x0018 },  /* 800         0.0   */
-       { 0x5, 0x71, 0x3015 },  /* 800         3.5   */
-       { 0x6, 0x98, 0x0018 },  /* 1200        0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x35, 0x3F, 0x00, 0x00 },        /* 350   350      0.0   */
+       { 0xA, 0x4F, 0x37, 0x00, 0x08 },        /* 350   500      3.1   */
+       { 0xC, 0x71, 0x2F, 0x00, 0x10 },        /* 350   700      6.0   */
+       { 0x6, 0x7F, 0x2B, 0x00, 0x14 },        /* 350   900      8.2   */
+       { 0xA, 0x4C, 0x3F, 0x00, 0x00 },        /* 500   500      0.0   */
+       { 0xC, 0x73, 0x34, 0x00, 0x0B },        /* 500   700      2.9   */
+       { 0x6, 0x7F, 0x2F, 0x00, 0x10 },        /* 500   900      5.1   */
+       { 0xC, 0x6C, 0x3C, 0x00, 0x03 },        /* 650   700      0.6   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   900      3.5   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 900   900      0.0   */
 };
 
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
-                               /* Voltage mV  db    */
-       { 0x0, 0x00, 0x00 },    /* 200         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 200         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 200         6.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 250         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 250         4.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         0.0   */
-       { 0x0, 0x00, 0x00 },    /* 300         1.5   */
-       { 0x0, 0x00, 0x00 },    /* 350         0.0   */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+                                               /* NT mV Trans mV db    */
+       { 0xA, 0x60, 0x3F, 0x00, 0x00 },        /* 450   450      0.0   */
+       { 0xB, 0x73, 0x36, 0x00, 0x09 },        /* 450   650      3.2   */
+       { 0x6, 0x7F, 0x31, 0x00, 0x0E },        /* 450   850      5.5   */
+       { 0xB, 0x73, 0x3F, 0x00, 0x00 },        /* 650   650      0.0   ALS */
+       { 0x6, 0x7F, 0x37, 0x00, 0x08 },        /* 650   850      2.3   */
+       { 0x6, 0x7F, 0x3F, 0x00, 0x00 },        /* 850   850      0.0   */
+       { 0x6, 0x7F, 0x35, 0x00, 0x0A },        /* 600   850      3.0   */
 };
 
 struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
        }
 }
 
-static const struct icl_combo_phy_ddi_buf_trans *
+static const struct cnl_ddi_buf_trans *
 icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
-                       int type, int *n_entries)
+                       int type, int rate, int *n_entries)
 {
-       u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
-
-       if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
-               switch (voltage) {
-               case VOLTAGE_INFO_0_85V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
-                       return icl_combo_phy_ddi_translations_edp_0_85V;
-               case VOLTAGE_INFO_0_95V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
-                       return icl_combo_phy_ddi_translations_edp_0_95V;
-               case VOLTAGE_INFO_1_05V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
-                       return icl_combo_phy_ddi_translations_edp_1_05V;
-               default:
-                       MISSING_CASE(voltage);
-                       return NULL;
-               }
-       } else {
-               switch (voltage) {
-               case VOLTAGE_INFO_0_85V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
-               case VOLTAGE_INFO_0_95V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
-               case VOLTAGE_INFO_1_05V:
-                       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
-                       return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
-               default:
-                       MISSING_CASE(voltage);
-                       return NULL;
-               }
+       if (type == INTEL_OUTPUT_HDMI) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+               return icl_combo_phy_ddi_translations_hdmi;
+       } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+               return icl_combo_phy_ddi_translations_edp_hbr3;
+       } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+               *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+               return icl_combo_phy_ddi_translations_edp_hbr2;
        }
+
+       *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+       return icl_combo_phy_ddi_translations_dp_hbr2;
 }
 
 static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
        if (IS_ICELAKE(dev_priv)) {
                if (intel_port_is_combophy(dev_priv, port))
-                       icl_get_combo_buf_trans(dev_priv, port,
-                                               INTEL_OUTPUT_HDMI, &n_entries);
+                       icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+                                               0, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
                default_entry = n_entries - 1;
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
                        return DDI_CLK_SEL_TBT_810;
                default:
                        MISSING_CASE(clock);
-                       break;
+                       return DDI_CLK_SEL_NONE;
                }
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
        int n_entries;
 
        if (IS_ICELAKE(dev_priv)) {
                if (intel_port_is_combophy(dev_priv, port))
                        icl_get_combo_buf_trans(dev_priv, port, encoder->type,
-                                               &n_entries);
+                                               intel_dp->link_rate, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
        } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
 }
 
 static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
-                                        u32 level, enum port port, int type)
+                                       u32 level, enum port port, int type,
+                                       int rate)
 {
-       const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+       const struct cnl_ddi_buf_trans *ddi_translations = NULL;
        u32 n_entries, val;
        int ln;
 
        ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
-                                                  &n_entries);
+                                                  rate, &n_entries);
        if (!ddi_translations)
                return;
 
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
                level = n_entries - 1;
        }
 
-       /* Set PORT_TX_DW5 Rterm Sel to 110b. */
+       /* Set PORT_TX_DW5 */
        val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
-       val &= ~RTERM_SELECT_MASK;
+       val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
+                 TAP2_DISABLE | TAP3_DISABLE);
+       val |= SCALING_MODE_SEL(0x2);
        val |= RTERM_SELECT(0x6);
-       I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
-
-       /* Program PORT_TX_DW5 */
-       val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
-       /* Set DisableTap2 and DisableTap3 if MIPI DSI
-        * Clear DisableTap2 and DisableTap3 for all other Ports
-        */
-       if (type == INTEL_OUTPUT_DSI) {
-               val |= TAP2_DISABLE;
-               val |= TAP3_DISABLE;
-       } else {
-               val &= ~TAP2_DISABLE;
-               val &= ~TAP3_DISABLE;
-       }
+       val |= TAP3_DISABLE;
        I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 
        /* Program PORT_TX_DW2 */
        val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
        val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
                 RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
-       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
+       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
        /* Program Rcomp scalar for every table entry */
-       val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+       val |= RCOMP_SCALAR(0x98);
        I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
 
        /* Program PORT_TX_DW4 */
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
                val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
                val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
                         CURSOR_COEFF_MASK);
-               val |= ddi_translations[level].dw4_scaling;
+               val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
+               val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
+               val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
                I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
        }
+
+       /* Program PORT_TX_DW7 */
+       val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+       val &= ~N_SCALAR_MASK;
+       val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+       I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
 }
 
 static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 
        /* 5. Program swing and de-emphasis */
-       icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+       icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
 
        /* 6. Set training enable to trigger update */
        val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
index 3da9c0f9e9485c7c4b9ccf8fefe5c71f72f1ea02..248128126422be28214b2b3c797cc31b16fe8ca3 100644 (file)
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
        }
 }
 
+static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+       /*
+        * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
+        * the hardware when a high res displays plugged in. DPLL P
+        * divider is zero, and the pipe timings are bonkers. We'll
+        * try to disable everything in that case.
+        *
+        * FIXME would be nice to be able to sanitize this state
+        * without several WARNs, but for now let's take the easy
+        * road.
+        */
+       return IS_GEN6(dev_priv) &&
+               crtc_state->base.active &&
+               crtc_state->shared_dpll &&
+               crtc_state->port_clock == 0;
+}
+
 static void intel_sanitize_encoder(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_connector *connector;
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc_state *crtc_state = crtc ?
+               to_intel_crtc_state(crtc->base.state) : NULL;
 
        /* We need to check both for a crtc link (meaning that the
         * encoder is active and trying to read from a pipe) and the
         * pipe itself being active. */
-       bool has_active_crtc = encoder->base.crtc &&
-               to_intel_crtc(encoder->base.crtc)->active;
+       bool has_active_crtc = crtc_state &&
+               crtc_state->base.active;
+
+       if (crtc_state && has_bogus_dpll_config(crtc_state)) {
+               DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+                             pipe_name(crtc->pipe));
+               has_active_crtc = false;
+       }
 
        connector = intel_encoder_find_connector(encoder);
        if (connector && !has_active_crtc) {
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                /* Connector is active, but has no active pipe. This is
                 * fallout from our resume register restoring. Disable
                 * the encoder manually again. */
-               if (encoder->base.crtc) {
-                       struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
+               if (crtc_state) {
+                       struct drm_encoder *best_encoder;
 
                        DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
                                      encoder->base.base.id,
                                      encoder->base.name);
+
+                       /* avoid oopsing in case the hooks consult best_encoder */
+                       best_encoder = connector->base.state->best_encoder;
+                       connector->base.state->best_encoder = &encoder->base;
+
                        if (encoder->disable)
-                               encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+                               encoder->disable(encoder, crtc_state,
+                                                connector->base.state);
                        if (encoder->post_disable)
-                               encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+                               encoder->post_disable(encoder, crtc_state,
+                                                     connector->base.state);
+
+                       connector->base.state->best_encoder = best_encoder;
                }
                encoder->base.crtc = NULL;
 
index fdd2cbc56fa335b6bf084c00451eca8a0ef957e3..22a74608c6e493192b2f1e144c11f961ac6af4d0 100644 (file)
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
        enum port port = dig_port->base.port;
 
-       if (port == PORT_B)
+       if (intel_port_is_combophy(dev_priv, port) &&
+           !intel_dp_is_edp(intel_dp))
                return 540000;
 
        return 810000;
index f94a04b4ad8788bcc1d8f9ca3073eaa00f33648a..e9ddeaf05a1413f022b569c49ded1a9f92102d32 100644 (file)
@@ -209,6 +209,16 @@ struct intel_fbdev {
        unsigned long vma_flags;
        async_cookie_t cookie;
        int preferred_bpp;
+
+       /* Whether or not fbdev hpd processing is temporarily suspended */
+       bool hpd_suspended : 1;
+       /* Set when a hotplug was received while HPD processing was
+        * suspended
+        */
+       bool hpd_waiting : 1;
+
+       /* Protects hpd_suspended */
+       struct mutex hpd_lock;
 };
 
 struct intel_encoder {
index fb5bb5b32a6034d152516ae11c15913e2f97597d..7f365ac0b549d557447352906d670da18ae36a1f 100644 (file)
@@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev)
        if (ifbdev == NULL)
                return -ENOMEM;
 
+       mutex_init(&ifbdev->hpd_lock);
        drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
 
        if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
        intel_fbdev_destroy(ifbdev);
 }
 
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+       bool send_hpd = false;
+
+       mutex_lock(&ifbdev->hpd_lock);
+       ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+       send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+       ifbdev->hpd_waiting = false;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd) {
+               DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+               drm_fb_helper_hotplug_event(&ifbdev->helper);
+       }
+}
+
 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
                 */
                if (state != FBINFO_STATE_RUNNING)
                        flush_work(&dev_priv->fbdev_suspend_work);
+
                console_lock();
        } else {
                /*
@@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
        drm_fb_helper_set_suspend(&ifbdev->helper, state);
        console_unlock();
+
+       intel_fbdev_hpd_set_suspend(ifbdev, state);
 }
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+       bool send_hpd;
 
        if (!ifbdev)
                return;
 
        intel_fbdev_sync(ifbdev);
-       if (ifbdev->vma || ifbdev->helper.deferred_setup)
+
+       mutex_lock(&ifbdev->hpd_lock);
+       send_hpd = !ifbdev->hpd_suspended;
+       ifbdev->hpd_waiting = true;
+       mutex_unlock(&ifbdev->hpd_lock);
+
+       if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index b8f106d9ecf8b1be6286b2dfaeac1c5188629528..3ac20153705a22d27ee2e8f4ae5b112ab40f42c3 100644 (file)
 struct opregion_header {
        u8 signature[16];
        u32 size;
-       u32 opregion_ver;
+       struct {
+               u8 rsvd;
+               u8 revision;
+               u8 minor;
+               u8 major;
+       }  __packed over;
        u8 bios_ver[32];
        u8 vbios_ver[16];
        u8 driver_ver[16];
@@ -119,7 +124,8 @@ struct opregion_asle {
        u64 fdss;
        u32 fdsp;
        u32 stat;
-       u64 rvda;       /* Physical address of raw vbt data */
+       u64 rvda;       /* Physical (2.0) or relative from opregion (2.1+)
+                        * address of raw VBT data. */
        u32 rvds;       /* Size of raw vbt data */
        u8 rsvd[58];
 } __packed;
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
        opregion->header = base;
        opregion->lid_state = base + ACPI_CLID;
 
+       DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
+                        opregion->header->over.major,
+                        opregion->header->over.minor,
+                        opregion->header->over.revision);
+
        mboxes = opregion->header->mboxes;
        if (mboxes & MBOX_ACPI) {
                DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
        if (dmi_check_system(intel_no_opregion_vbt))
                goto out;
 
-       if (opregion->header->opregion_ver >= 2 && opregion->asle &&
+       if (opregion->header->over.major >= 2 && opregion->asle &&
            opregion->asle->rvda && opregion->asle->rvds) {
-               opregion->rvda = memremap(opregion->asle->rvda,
-                                         opregion->asle->rvds,
+               resource_size_t rvda = opregion->asle->rvda;
+
+               /*
+                * opregion 2.0: rvda is the physical VBT address.
+                *
+                * opregion 2.1+: rvda is unsigned, relative offset from
+                * opregion base, and should never point within opregion.
+                */
+               if (opregion->header->over.major > 2 ||
+                   opregion->header->over.minor >= 1) {
+                       WARN_ON(rvda < OPREGION_SIZE);
+
+                       rvda += asls;
+               }
+
+               opregion->rvda = memremap(rvda, opregion->asle->rvds,
                                          MEMREMAP_WB);
+
                vbt = opregion->rvda;
                vbt_size = opregion->asle->rvds;
                if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
                        goto out;
                } else {
                        DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+                       memunmap(opregion->rvda);
+                       opregion->rvda = NULL;
                }
        }
 
index 72edaa7ff4114fc61894f298f8d3952e8c5855c9..a1a7cc29fdd1a5e8131e0b70f8bc40189006cefb 100644 (file)
@@ -415,16 +415,17 @@ struct intel_engine_cs {
                /**
                 * @enable_count: Reference count for the enabled samplers.
                 *
-                * Index number corresponds to the bit number from @enable.
+                * Index number corresponds to @enum drm_i915_pmu_engine_sample.
                 */
-               unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+               unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
                /**
                 * @sample: Counter values for sampling events.
                 *
                 * Our internal timer stores the current counters in this field.
+                *
+                * Index number corresponds to @enum drm_i915_pmu_engine_sample.
                 */
-#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
-               struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+               struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
        } pmu;
 
        /*
index d2e003d8f3dbe64247cac7de9baf6a6fcaa0c986..5170a0f5fe7b6b39ed36e750b853475a7a4a5b0d 100644 (file)
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
 
        keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
 
-       keymsk = key->channel_mask & 0x3ffffff;
+       keymsk = key->channel_mask & 0x7ffffff;
        if (alpha < 0xff)
                keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
 
index 2c5bbe3173537346e1ea0d5c0b0f0e011ea485e7..e31e263cf86bd60464d1daa220ac1d45d1c763fd 100644 (file)
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                int bus_format;
 
                ret = of_property_read_u32(child, "reg", &i);
-               if (ret || i < 0 || i > 1)
-                       return -EINVAL;
+               if (ret || i < 0 || i > 1) {
+                       ret = -EINVAL;
+                       goto free_child;
+               }
 
                if (!of_device_is_available(child))
                        continue;
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                channel = &imx_ldb->channel[i];
                channel->ldb = imx_ldb;
                channel->chno = i;
-               channel->child = child;
 
                /*
                 * The output port is port@4 with an external 4-port mux or
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                                                  imx_ldb->lvds_mux ? 4 : 2, 0,
                                                  &channel->panel, &channel->bridge);
                if (ret && ret != -ENODEV)
-                       return ret;
+                       goto free_child;
 
                /* panel ddc only if there is no bridge */
                if (!channel->bridge) {
                        ret = imx_ldb_panel_ddc(dev, channel, child);
                        if (ret)
-                               return ret;
+                               goto free_child;
                }
 
                bus_format = of_get_bus_format(dev, child);
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                if (bus_format < 0) {
                        dev_err(dev, "could not determine data mapping: %d\n",
                                bus_format);
-                       return bus_format;
+                       ret = bus_format;
+                       goto free_child;
                }
                channel->bus_format = bus_format;
+               channel->child = child;
 
                ret = imx_ldb_register(drm, channel);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       channel->child = NULL;
+                       goto free_child;
+               }
        }
 
        dev_set_drvdata(dev, imx_ldb);
 
        return 0;
+
+free_child:
+       of_node_put(child);
+       return ret;
 }
 
 static void imx_ldb_unbind(struct device *dev, struct device *master,
index c390924de93d978d3acb4200c846fe6b53f753c8..21e964f6ab5cf58559e059d7d34c4bb26359c042 100644 (file)
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
        if (ret)
                return ret;
 
-       /* CRTC should be enabled */
+       /* nothing to check when disabling or disabled */
        if (!crtc_state->enable)
-               return -EINVAL;
+               return 0;
 
        switch (plane->type) {
        case DRM_PLANE_TYPE_PRIMARY:
index 00a9c2ab9e6c8932baecc9b5591b07626201c589..64fb788b664749473ac89894ae9f1c8eeda5f323 100644 (file)
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
 
 static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
 {
-       struct dsi_data *dsi = p;
+       struct dsi_data *dsi = s->private;
        struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
        enum dss_clk_source dispc_clk_src, dsi_clk_src;
        int dsi_module = dsi->module_id;
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
 static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
 {
-       struct dsi_data *dsi = p;
+       struct dsi_data *dsi = s->private;
        unsigned long flags;
        struct dsi_irq_stats stats;
 
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
 
 static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
 {
-       struct dsi_data *dsi = p;
+       struct dsi_data *dsi = s->private;
 
        if (dsi_runtime_get(dsi))
                return 0;
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
        dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
        dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
        dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+       /*
+        * HACK: These flags should be handled through the omap_dss_device bus
+        * flags, but this will only be possible when the DSI encoder will be
+        * converted to the omapdrm-managed encoder model.
+        */
+       dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+       dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+       dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
+       dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+       dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
+       dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
 
        dss_mgr_set_timings(&dsi->output, &dsi->vm);
 
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
 
        snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
        dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
-                                                   dsi_dump_dsi_regs, &dsi);
+                                                   dsi_dump_dsi_regs, dsi);
 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
        snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
        dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
-                                                   dsi_dump_dsi_irqs, &dsi);
+                                                   dsi_dump_dsi_irqs, dsi);
 #endif
        snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
        dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
-                                                   dsi_dump_dsi_clocks, &dsi);
+                                                   dsi_dump_dsi_clocks, dsi);
 
        return 0;
 }
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
        dss_debugfs_remove_file(dsi->debugfs.irqs);
        dss_debugfs_remove_file(dsi->debugfs.regs);
 
-       of_platform_depopulate(dev);
-
        WARN_ON(dsi->scp_clk_refcount > 0);
 
        dss_pll_unregister(&dsi->pll);
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
 
        dsi_uninit_output(dsi);
 
+       of_platform_depopulate(&pdev->dev);
+
        pm_runtime_disable(&pdev->dev);
 
        if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
index 37f93022a1060fe47fb959addb87c7f48502912f..c0351abf83a35e7b0afe6cf36b5d6dafb7c112fc 100644 (file)
@@ -1,17 +1,8 @@
-//SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  * Author:
  *      Sandy Huang <hjc@rock-chips.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <drm/drmP.h>
index 38b52e63b2b04f583296e700af528c4eacd2c080..27b9635124bc1d0e563e52fd8eb71acac50ae87a 100644 (file)
@@ -1,17 +1,8 @@
-//SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
  * Author:
  *      Sandy Huang <hjc@rock-chips.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #ifdef CONFIG_ROCKCHIP_RGB
index 0420f5c978b9d641926150f0d7987331b9fdb55d..cf45d0f940f9b3e5bc0c4d883a5c878ca50d6305 100644 (file)
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
                        return PTR_ERR(tcon->sclk0);
                }
        }
+       clk_prepare_enable(tcon->sclk0);
 
        if (tcon->quirks->has_channel_1) {
                tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
 
 static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
 {
+       clk_disable_unprepare(tcon->sclk0);
        clk_disable_unprepare(tcon->clk);
 }
 
index 9d9e8146db90ca04d1e67194d9547dc95582f125..d7b409a3c0f8cfd5fcd20a7e628e5a8aafd2d3fe 100644 (file)
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
+
 #include "vkms_drv.h"
 #include <linux/crc32.h>
 #include <drm/drm_atomic.h>
index 177bbcb38306363b05d4dea04bedb83e617dc258..eb56ee893761fae168776aa1b4499bad0406a99f 100644 (file)
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_atomic_helper.h>
index 83087877565cf77b9104d67636c183231fdd19c4..7dcbecb5fac26b4c4b3216feea1bba239f74827f 100644 (file)
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 /**
  * DOC: vkms (Virtual Kernel Modesetting)
index e4469cd3d25491198209e3898189fc52e54e5ed7..81f1cfbeb9362e4edc9a39f1688060d1d23124c0 100644 (file)
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
 #ifndef _VKMS_DRV_H_
 #define _VKMS_DRV_H_
 
index 80311daed47a0ac752a3e1e68bd54f9d8b60d412..138b0bb325cf9662cd59b5a54158947dc691a2d9 100644 (file)
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include <linux/shmem_fs.h>
 
index 271a0eb9042c3ee7ba01a3c73329f883dbb13bd3..4173e4f483341307965aeed3c9a20376d8ae9f2f 100644 (file)
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_crtc_helper.h>
index 418817600ad11052105418572e6c2a6965bba71a..0e67d2d42f0cc4014fe88b263237fa347af07116 100644 (file)
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
 
 #include "vkms_drv.h"
 #include <drm/drm_plane_helper.h>
index 474b00e19697d90e7a60ee60fb8f8badd330f81e..0a7d4395d427bacf0e27cb5251ab74bb6e816ac1 100644 (file)
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
        .cpmem_ofs = 0x1f000000,
        .srm_ofs = 0x1f040000,
        .tpm_ofs = 0x1f060000,
-       .csi0_ofs = 0x1f030000,
-       .csi1_ofs = 0x1f038000,
+       .csi0_ofs = 0x1e030000,
+       .csi1_ofs = 0x1e038000,
        .ic_ofs = 0x1e020000,
        .disp0_ofs = 0x1e040000,
        .disp1_ofs = 0x1e048000,
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
        .cpmem_ofs = 0x07000000,
        .srm_ofs = 0x07040000,
        .tpm_ofs = 0x07060000,
-       .csi0_ofs = 0x07030000,
-       .csi1_ofs = 0x07038000,
+       .csi0_ofs = 0x06030000,
+       .csi1_ofs = 0x06038000,
        .ic_ofs = 0x06020000,
        .disp0_ofs = 0x06040000,
        .disp1_ofs = 0x06048000,
index 2f8db9d625514928006c6b31adeccbe312514ef3..4a28f3fbb0a28c55630be20edbb8c2df4de74645 100644 (file)
@@ -106,6 +106,7 @@ struct ipu_pre {
        void                    *buffer_virt;
        bool                    in_use;
        unsigned int            safe_window_end;
+       unsigned int            last_bufaddr;
 };
 
 static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
 
        writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+       pre->last_bufaddr = bufaddr;
 
        val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
              IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
        unsigned short current_yblock;
        u32 val;
 
+       if (bufaddr == pre->last_bufaddr)
+               return;
+
        writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+       pre->last_bufaddr = bufaddr;
 
        do {
                if (time_after(jiffies, timeout)) {
index c530476edba62b7804b892e47a675396dfe1c3c8..ac9fda1b5a7233c227cd5517dcf6331a29483a60 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/kfifo.h>
 #include <linux/sched/signal.h>
 #include <linux/export.h>
 #include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
 /* enqueue string to 'events' ring buffer */
 void hid_debug_event(struct hid_device *hdev, char *buf)
 {
-       unsigned i;
        struct hid_debug_list *list;
        unsigned long flags;
 
        spin_lock_irqsave(&hdev->debug_list_lock, flags);
-       list_for_each_entry(list, &hdev->debug_list, node) {
-               for (i = 0; buf[i]; i++)
-                       list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
-                               buf[i];
-               list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
-        }
+       list_for_each_entry(list, &hdev->debug_list, node)
+               kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
        spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
        wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
        hid_debug_event(hdev, buf);
 
        kfree(buf);
-        wake_up_interruptible(&hdev->debug_wait);
-
+       wake_up_interruptible(&hdev->debug_wait);
 }
 EXPORT_SYMBOL_GPL(hid_dump_input);
 
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
                goto out;
        }
 
-       if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
-               err = -ENOMEM;
+       err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
+       if (err) {
                kfree(list);
                goto out;
        }
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
                size_t count, loff_t *ppos)
 {
        struct hid_debug_list *list = file->private_data;
-       int ret = 0, len;
+       int ret = 0, copied;
        DECLARE_WAITQUEUE(wait, current);
 
        mutex_lock(&list->read_mutex);
-       while (ret == 0) {
-               if (list->head == list->tail) {
-                       add_wait_queue(&list->hdev->debug_wait, &wait);
-                       set_current_state(TASK_INTERRUPTIBLE);
-
-                       while (list->head == list->tail) {
-                               if (file->f_flags & O_NONBLOCK) {
-                                       ret = -EAGAIN;
-                                       break;
-                               }
-                               if (signal_pending(current)) {
-                                       ret = -ERESTARTSYS;
-                                       break;
-                               }
+       if (kfifo_is_empty(&list->hid_debug_fifo)) {
+               add_wait_queue(&list->hdev->debug_wait, &wait);
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               while (kfifo_is_empty(&list->hid_debug_fifo)) {
+                       if (file->f_flags & O_NONBLOCK) {
+                               ret = -EAGAIN;
+                               break;
+                       }
 
-                               if (!list->hdev || !list->hdev->debug) {
-                                       ret = -EIO;
-                                       set_current_state(TASK_RUNNING);
-                                       goto out;
-                               }
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
 
-                               /* allow O_NONBLOCK from other threads */
-                               mutex_unlock(&list->read_mutex);
-                               schedule();
-                               mutex_lock(&list->read_mutex);
-                               set_current_state(TASK_INTERRUPTIBLE);
+                       /* if list->hdev is NULL we cannot remove_wait_queue().
+                        * if list->hdev->debug is 0 then hid_debug_unregister()
+                        * was already called and list->hdev is being destroyed.
+                        * if we add remove_wait_queue() here we can hit a race.
+                        */
+                       if (!list->hdev || !list->hdev->debug) {
+                               ret = -EIO;
+                               set_current_state(TASK_RUNNING);
+                               goto out;
                        }
 
-                       set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&list->hdev->debug_wait, &wait);
+                       /* allow O_NONBLOCK from other threads */
+                       mutex_unlock(&list->read_mutex);
+                       schedule();
+                       mutex_lock(&list->read_mutex);
+                       set_current_state(TASK_INTERRUPTIBLE);
                }
 
-               if (ret)
-                       goto out;
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&list->hdev->debug_wait, &wait);
 
-               /* pass the ringbuffer contents to userspace */
-copy_rest:
-               if (list->tail == list->head)
+               if (ret)
                        goto out;
-               if (list->tail > list->head) {
-                       len = list->tail - list->head;
-                       if (len > count)
-                               len = count;
-
-                       if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       ret += len;
-                       list->head += len;
-               } else {
-                       len = HID_DEBUG_BUFSIZE - list->head;
-                       if (len > count)
-                               len = count;
-
-                       if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
-                               ret = -EFAULT;
-                               goto out;
-                       }
-                       list->head = 0;
-                       ret += len;
-                       count -= len;
-                       if (count > 0)
-                               goto copy_rest;
-               }
-
        }
+
+       /* pass the fifo content to userspace, locking is not needed with only
+        * one concurrent reader and one concurrent writer
+        */
+       ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
+       if (ret)
+               goto out;
+       ret = copied;
 out:
        mutex_unlock(&list->read_mutex);
        return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
        struct hid_debug_list *list = file->private_data;
 
        poll_wait(file, &list->hdev->debug_wait, wait);
-       if (list->head != list->tail)
+       if (!kfifo_is_empty(&list->hid_debug_fifo))
                return EPOLLIN | EPOLLRDNORM;
        if (!list->hdev->debug)
                return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
        spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_del(&list->node);
        spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
-       kfree(list->hid_debug_buf);
+       kfifo_free(&list->hid_debug_fifo);
        kfree(list);
 
        return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
 {
        debugfs_remove_recursive(hid_debug_root);
 }
-
index b1086bfb04656aa45422ff1f89a57647f53da369..cd9c65f3d404ff92f0eef87d4d99236b370e82cc 100644 (file)
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int omap_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
 {
        struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int omap_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
 {
        struct omap_i2c_dev *omap = dev_get_drvdata(dev);
 
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops omap_i2c_pm_ops = {
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                     pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
                           omap_i2c_runtime_resume, NULL)
 };
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif /* CONFIG_PM */
 
 static struct platform_driver omap_i2c_driver = {
        .probe          = omap_i2c_probe,
        .remove         = omap_i2c_remove,
        .driver         = {
                .name   = "omap_i2c",
-               .pm     = OMAP_I2C_PM_OPS,
+               .pm     = &omap_i2c_pm_ops,
                .of_match_table = of_match_ptr(omap_i2c_of_match),
        },
 };
index 031d568b4972fa59e6426b3dfe013b0c3b94ad9e..4e339cfd0c546db8751f7d547ce14d2ce33bb5ed 100644 (file)
 #include <linux/iio/machine.h>
 #include <linux/iio/driver.h>
 
-#define AXP288_ADC_EN_MASK             0xF1
-#define AXP288_ADC_TS_PIN_GPADC                0xF2
-#define AXP288_ADC_TS_PIN_ON           0xF3
+/*
+ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
+ * left as-is to avoid breaking charging on devices without a temp-sensor.
+ */
+#define AXP288_ADC_EN_MASK                             0xF0
+#define AXP288_ADC_TS_ENABLE                           0x01
+
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK              GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF                      (0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING         (1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND              (2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON                       (3 << 0)
 
 enum axp288_adc_id {
        AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
 struct axp288_adc_info {
        int irq;
        struct regmap *regmap;
+       bool ts_enabled;
 };
 
 static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
        return IIO_VAL_INT;
 }
 
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
-                               unsigned long address)
+/*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ */
+static int axp288_adc_set_ts(struct axp288_adc_info *info,
+                            unsigned int mode, unsigned long address)
 {
        int ret;
 
-       /* channels other than GPADC do not need to switch TS pin */
+       /* No need to switch the current-source if the TS pin is disabled */
+       if (!info->ts_enabled)
+               return 0;
+
+       /* Channels other than GPADC do not need the current source */
        if (address != AXP288_GP_ADC_H)
                return 0;
 
-       ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+       ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
        if (ret)
                return ret;
 
        /* When switching to the GPADC pin give things some time to settle */
-       if (mode == AXP288_ADC_TS_PIN_GPADC)
+       if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
                usleep_range(6000, 10000);
 
        return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        mutex_lock(&indio_dev->mlock);
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+               if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
                                        chan->address)) {
                        dev_err(&indio_dev->dev, "GPADC mode\n");
                        ret = -EINVAL;
                        break;
                }
                ret = axp288_adc_read_channel(val, chan->address, info->regmap);
-               if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+               if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
                                                chan->address))
                        dev_err(&indio_dev->dev, "TS pin restore\n");
                break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
-static int axp288_adc_set_state(struct regmap *regmap)
+static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
-       /* ADC should be always enabled for internal FG to function */
-       if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
-               return -EIO;
+       int ret, adc_enable_val;
+
+       /*
+        * Determine if the TS pin is enabled and set the TS current-source
+        * accordingly.
+        */
+       ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
+       if (ret)
+               return ret;
+
+       if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
+               info->ts_enabled = true;
+               ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                        AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+                                        AXP288_ADC_TS_CURRENT_ON);
+       } else {
+               info->ts_enabled = false;
+               ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                        AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+                                        AXP288_ADC_TS_CURRENT_OFF);
+       }
+       if (ret)
+               return ret;
 
-       return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+       /* Turn on the ADC for all channels except TS, leave TS as is */
+       return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
+                                 AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
 }
 
 static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
         * Set ADC to enabled state at all time, including system suspend.
         * otherwise internal fuel gauge functionality may be affected.
         */
-       ret = axp288_adc_set_state(axp20x->regmap);
+       ret = axp288_adc_initialize(info);
        if (ret) {
                dev_err(&pdev->dev, "unable to enable ADC device\n");
                return ret;
index 184d686ebd9958ec316a53b16e1cab667b4518fa..8b4568edd5cb6e93a7567523999313c3d64c781b 100644 (file)
@@ -41,6 +41,7 @@
 
 #define ADS8688_VREF_MV                        4096
 #define ADS8688_REALBITS               16
+#define ADS8688_MAX_CHANNELS           8
 
 /*
  * enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
 {
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
-       u16 buffer[8];
+       u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
        int i, j = 0;
 
        for (i = 0; i < indio_dev->masklength; i++) {
index a406ad31b096f6755121fede4b7a6e2de3a24342..3a20cb5d9bffc29b6869954a6bbef94ecdcd42b5 100644 (file)
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                switch (chan->type) {
                case IIO_TEMP:
-                       *val = 1; /* 0.01 */
-                       *val2 = 100;
-                       break;
+                       *val = 10;
+                       return IIO_VAL_INT;
                case IIO_PH:
                        *val = 1; /* 0.001 */
                        *val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
                           int val, int val2, long mask)
 {
        struct atlas_data *data = iio_priv(indio_dev);
-       __be32 reg = cpu_to_be32(val);
+       __be32 reg = cpu_to_be32(val / 10);
 
        if (val2 != 0 || val < 0 || val > 20000)
                return -EINVAL;
index 1457f931218e0bede0c3c34660aaf870afaaa5a7..78188bf7e90d7a42f6dbc263f2e412c77de92e0c 100644 (file)
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
-static int intel_iommu_sm = 1;
+static int intel_iommu_sm;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str)
                } else if (!strncmp(str, "sp_off", 6)) {
                        pr_info("Disable supported super page\n");
                        intel_iommu_superpage = 0;
-               } else if (!strncmp(str, "sm_off", 6)) {
-                       pr_info("Intel-IOMMU: disable scalable mode support\n");
-                       intel_iommu_sm = 0;
+               } else if (!strncmp(str, "sm_on", 5)) {
+                       pr_info("Intel-IOMMU: scalable mode supported\n");
+                       intel_iommu_sm = 1;
                } else if (!strncmp(str, "tboot_noforce", 13)) {
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
index 7f2a45445b00e04aecb29b94b694cb4d51c9593a..c3aba3fc818d3c190166e07c8179dee5832b46bc 100644 (file)
@@ -97,9 +97,14 @@ struct its_device;
  * The ITS structure - contains most of the infrastructure, with the
  * top-level MSI domain, the command queue, the collections, and the
  * list of devices writing to it.
+ *
+ * dev_alloc_lock has to be taken for device allocations, while the
+ * spinlock must be taken to parse data structures such as the device
+ * list.
  */
 struct its_node {
        raw_spinlock_t          lock;
+       struct mutex            dev_alloc_lock;
        struct list_head        entry;
        void __iomem            *base;
        phys_addr_t             phys_base;
@@ -156,6 +161,7 @@ struct its_device {
        void                    *itt;
        u32                     nr_ites;
        u32                     device_id;
+       bool                    shared;
 };
 
 static struct {
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
                nr_irqs /= 2;
        } while (nr_irqs > 0);
 
+       if (!nr_irqs)
+               err = -ENOSPC;
+
        if (err)
                goto out;
 
@@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void)
        return 0;
 }
 
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+{
+       u32 count = 1000000;    /* 1s! */
+       bool clean;
+       u64 val;
+
+       val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+       val &= ~GICR_VPENDBASER_Valid;
+       gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       do {
+               val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+               clean = !(val & GICR_VPENDBASER_Dirty);
+               if (!clean) {
+                       count--;
+                       cpu_relax();
+                       udelay(1);
+               }
+       } while (!clean && count);
+
+       return val;
+}
+
 static void its_cpu_init_lpis(void)
 {
        void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void)
        val |= GICR_CTLR_ENABLE_LPIS;
        writel_relaxed(val, rbase + GICR_CTLR);
 
+       if (gic_rdists->has_vlpis) {
+               void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+               /*
+                * It's possible for CPU to receive VLPIs before it is
+                * sheduled as a vPE, especially for the first CPU, and the
+                * VLPI with INTID larger than 2^(IDbits+1) will be considered
+                * as out of range and dropped by GIC.
+                * So we initialize IDbits to known value to avoid VLPI drop.
+                */
+               val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+               pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+                       smp_processor_id(), val);
+               gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+               /*
+                * Also clear Valid bit of GICR_VPENDBASER, in case some
+                * ancient programming gets left in and has possibility of
+                * corrupting memory.
+                */
+               val = its_clear_vpend_valid(vlpi_base);
+               WARN_ON(val & GICR_VPENDBASER_Dirty);
+       }
+
        /* Make sure the GIC has seen the above */
        dsb(sy);
 out:
@@ -2422,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
        struct its_device *its_dev;
        struct msi_domain_info *msi_info;
        u32 dev_id;
+       int err = 0;
 
        /*
         * We ignore "dev" entierely, and rely on the dev_id that has
@@ -2444,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
                return -EINVAL;
        }
 
+       mutex_lock(&its->dev_alloc_lock);
        its_dev = its_find_device(its, dev_id);
        if (its_dev) {
                /*
@@ -2451,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
                 * another alias (PCI bridge of some sort). No need to
                 * create the device.
                 */
+               its_dev->shared = true;
                pr_debug("Reusing ITT for devID %x\n", dev_id);
                goto out;
        }
 
        its_dev = its_create_device(its, dev_id, nvec, true);
-       if (!its_dev)
-               return -ENOMEM;
+       if (!its_dev) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
 out:
+       mutex_unlock(&its->dev_alloc_lock);
        info->scratchpad[0].ptr = its_dev;
-       return 0;
+       return err;
 }
 
 static struct msi_domain_ops its_msi_domain_ops = {
@@ -2566,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 {
        struct irq_data *d = irq_domain_get_irq_data(domain, virq);
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       struct its_node *its = its_dev->its;
        int i;
 
        for (i = 0; i < nr_irqs; i++) {
@@ -2580,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
                irq_domain_reset_irq_data(data);
        }
 
-       /* If all interrupts have been freed, start mopping the floor */
-       if (bitmap_empty(its_dev->event_map.lpi_map,
+       mutex_lock(&its->dev_alloc_lock);
+
+       /*
+        * If all interrupts have been freed, start mopping the
+        * floor. This is conditionned on the device not being shared.
+        */
+       if (!its_dev->shared &&
+           bitmap_empty(its_dev->event_map.lpi_map,
                         its_dev->event_map.nr_lpis)) {
                its_lpi_free(its_dev->event_map.lpi_map,
                             its_dev->event_map.lpi_base,
@@ -2593,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
                its_free_device(its_dev);
        }
 
+       mutex_unlock(&its->dev_alloc_lock);
+
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
 }
 
@@ -2755,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
 static void its_vpe_deschedule(struct its_vpe *vpe)
 {
        void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
-       u32 count = 1000000;    /* 1s! */
-       bool clean;
        u64 val;
 
-       /* We're being scheduled out */
-       val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-       val &= ~GICR_VPENDBASER_Valid;
-       gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
-       do {
-               val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-               clean = !(val & GICR_VPENDBASER_Dirty);
-               if (!clean) {
-                       count--;
-                       cpu_relax();
-                       udelay(1);
-               }
-       } while (!clean && count);
+       val = its_clear_vpend_valid(vlpi_base);
 
-       if (unlikely(!clean && !count)) {
+       if (unlikely(val & GICR_VPENDBASER_Dirty)) {
                pr_err_ratelimited("ITS virtual pending table not cleaning\n");
                vpe->idai = false;
                vpe->pending_last = true;
@@ -3517,6 +3573,7 @@ static int __init its_probe_one(struct resource *res,
        }
 
        raw_spin_lock_init(&its->lock);
+       mutex_init(&its->dev_alloc_lock);
        INIT_LIST_HEAD(&its->entry);
        INIT_LIST_HEAD(&its->its_device_list);
        typer = gic_read_typer(its_base + GITS_TYPER);
index 25f32e1d77647d0be5d8a59d3d214127ba05b9ac..3496b61a312aef87cc9668189fd9047a844e8ca3 100644 (file)
@@ -34,6 +34,9 @@
 #define SEL_INT_PENDING                (1 << 6)
 #define SEL_INT_NUM_MASK       0x3f
 
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ     (1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ     (1 << 6)
+
 struct icu_chip_data {
        int                     nr_irqs;
        unsigned int            virq_base;
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
 static const struct mmp_intc_conf mmp2_conf = {
        .conf_enable    = 0x20,
        .conf_disable   = 0x0,
-       .conf_mask      = 0x7f,
+       .conf_mask      = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+                         MMP2_ICU_INT_ROUTE_PJ4_FIQ,
 };
 
 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
index 211ed6cffd10e1be817884a7724f3f37687afb93..57897871188706339dc9dfd140a9b55426b478af 100644 (file)
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
        spin_lock_irqsave(&timer->dev->lock, flags);
        if (timer->id >= 0)
                list_move_tail(&timer->list, &timer->dev->expired);
-       spin_unlock_irqrestore(&timer->dev->lock, flags);
        wake_up_interruptible(&timer->dev->wait);
+       spin_unlock_irqrestore(&timer->dev->lock, flags);
 }
 
 static int
index 4eb5f8c5653553a448d2b0971a3dca04ca0ffe4b..a20531e5f3b4c6936f75d0b647b8b05267a6ca03 100644 (file)
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
 static void rq_completed(struct mapped_device *md)
 {
        /* nudge anyone waiting on suspend queue */
-       if (unlikely(waitqueue_active(&md->wait)))
+       if (unlikely(wq_has_sleeper(&md->wait)))
                wake_up(&md->wait);
 
        /*
index 2b53c3841b530b591c0c8ed688c0ea2b94b5f273..515e6af9bed2c7ab5e94e356215020422316a3ce 100644 (file)
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
                                    true, duration, &io->stats_aux);
 
        /* nudge anyone waiting on suspend queue */
-       if (unlikely(waitqueue_active(&md->wait)))
+       if (unlikely(wq_has_sleeper(&md->wait)))
                wake_up(&md->wait);
 }
 
@@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
                        return r;
        }
 
-       bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
+       bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+       clone->bi_iter.bi_size = to_bytes(len);
+
+       if (bio_integrity(bio))
+               bio_integrity_trim(clone);
 
        return 0;
 }
index 1fc8ea0f519bb28d2c4e2640cda8caeddba55a75..ca4c9cc218a22b9644957f04efa0d28142aab41c 100644 (file)
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head,
        struct mei_cl_cb *cb, *next;
 
        list_for_each_entry_safe(cb, next, head, list) {
-               if (cl == cb->cl)
+               if (cl == cb->cl) {
                        list_del_init(&cb->list);
+                       if (cb->fop_type == MEI_FOP_READ)
+                               mei_io_cb_free(cb);
+               }
        }
 }
 
index 23739a60517f8efc7fcf57136c6a759b9ca2d401..bb1ee9834a029d28fdd690ed660f6f6355c89b67 100644 (file)
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
+
 /*
  * MEI HW Section
  */
index e89497f858ae352bc773838a823c2a2113f7ccc5..3ab946ad32570ce645d18950d8be8d8582f0484a 100644 (file)
@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
        /* required last entry */
        {0, }
 };
index 2bfa3a903bf9a574f1d573b94e36ea4f6bc86273..744757f541be40f4ea4349579ff534ae88b06e53 100644 (file)
@@ -47,7 +47,8 @@
  * @dc: Virtio device control
  * @vpdev: VOP device which is the parent for this virtio device
  * @vr: Buffer for accessing the VRING
- * @used: Buffer for used
+ * @used_virt: Virtual address of used ring
+ * @used: DMA address of used ring
  * @used_size: Size of the used buffer
  * @reset_done: Track whether VOP reset is complete
  * @virtio_cookie: Cookie returned upon requesting a interrupt
@@ -61,6 +62,7 @@ struct _vop_vdev {
        struct mic_device_ctrl __iomem *dc;
        struct vop_device *vpdev;
        void __iomem *vr[VOP_MAX_VRINGS];
+       void *used_virt[VOP_MAX_VRINGS];
        dma_addr_t used[VOP_MAX_VRINGS];
        int used_size[VOP_MAX_VRINGS];
        struct completion reset_done;
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq)
 static void vop_del_vq(struct virtqueue *vq, int n)
 {
        struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
-       struct vring *vr = (struct vring *)(vq + 1);
        struct vop_device *vpdev = vdev->vpdev;
 
        dma_unmap_single(&vpdev->dev, vdev->used[n],
                         vdev->used_size[n], DMA_BIDIRECTIONAL);
-       free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+       free_pages((unsigned long)vdev->used_virt[n],
+                  get_order(vdev->used_size[n]));
        vring_del_virtqueue(vq);
        vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
        vdev->vr[n] = NULL;
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev)
                vop_del_vq(vq, idx++);
 }
 
+static struct virtqueue *vop_new_virtqueue(unsigned int index,
+                                     unsigned int num,
+                                     struct virtio_device *vdev,
+                                     bool context,
+                                     void *pages,
+                                     bool (*notify)(struct virtqueue *vq),
+                                     void (*callback)(struct virtqueue *vq),
+                                     const char *name,
+                                     void *used)
+{
+       bool weak_barriers = false;
+       struct vring vring;
+
+       vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
+       vring.used = used;
+
+       return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
+                                    notify, callback, name);
+}
+
 /*
  * This routine will assign vring's allocated in host/io memory. Code in
  * virtio_ring.c however continues to access this io memory as if it were local
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
        struct _mic_vring_info __iomem *info;
        void *used;
        int vr_size, _vr_size, err, magic;
-       struct vring *vr;
        u8 type = ioread8(&vdev->desc->type);
 
        if (index >= ioread8(&vdev->desc->num_vq))
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
                return ERR_PTR(-ENOMEM);
        vdev->vr[index] = va;
        memset_io(va, 0x0, _vr_size);
-       vq = vring_new_virtqueue(
-                               index,
-                               le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
-                               dev,
-                               false,
-                               ctx,
-                               (void __force *)va, vop_notify, callback, name);
-       if (!vq) {
-               err = -ENOMEM;
-               goto unmap;
-       }
+
        info = va + _vr_size;
        magic = ioread32(&info->magic);
 
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
                goto unmap;
        }
 
-       /* Allocate and reassign used ring now */
        vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
                                             sizeof(struct vring_used_elem) *
                                             le16_to_cpu(config.num));
        used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                        get_order(vdev->used_size[index]));
+       vdev->used_virt[index] = used;
        if (!used) {
                err = -ENOMEM;
                dev_err(_vop_dev(vdev), "%s %d err %d\n",
                        __func__, __LINE__, err);
-               goto del_vq;
+               goto unmap;
+       }
+
+       vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
+                              (void __force *)va, vop_notify, callback,
+                              name, used);
+       if (!vq) {
+               err = -ENOMEM;
+               goto free_used;
        }
+
        vdev->used[index] = dma_map_single(&vpdev->dev, used,
                                            vdev->used_size[index],
                                            DMA_BIDIRECTIONAL);
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
                err = -ENOMEM;
                dev_err(_vop_dev(vdev), "%s %d err %d\n",
                        __func__, __LINE__, err);
-               goto free_used;
+               goto del_vq;
        }
        writeq(vdev->used[index], &vqconfig->used_address);
-       /*
-        * To reassign the used ring here we are directly accessing
-        * struct vring_virtqueue which is a private data structure
-        * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
-        * vring_new_virtqueue() would ensure that
-        *  (&vq->vring == (struct vring *) (&vq->vq + 1));
-        */
-       vr = (struct vring *)(vq + 1);
-       vr->used = used;
 
        vq->priv = vdev;
        return vq;
+del_vq:
+       vring_del_virtqueue(vq);
 free_used:
        free_pages((unsigned long)used,
                   get_order(vdev->used_size[index]));
-del_vq:
-       vring_del_virtqueue(vq);
 unmap:
        vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
        return ERR_PTR(err);
@@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
        int ret = -1;
 
        if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+               struct device *dev = get_device(&vdev->vdev.dev);
+
                dev_dbg(&vpdev->dev,
                        "%s %d config_change %d type %d vdev %p\n",
                        __func__, __LINE__,
@@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
                iowrite8(-1, &dc->h2c_vdev_db);
                if (status & VIRTIO_CONFIG_S_DRIVER_OK)
                        wait_for_completion(&vdev->reset_done);
-               put_device(&vdev->vdev.dev);
+               put_device(dev);
                iowrite8(1, &dc->guest_ack);
                dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
                        __func__, __LINE__, ioread8(&dc->guest_ack));
index 60104e1079c5357f4919e0d7f684efc003ab159f..37f174ccbcec4b1f10d0af717707a38fa46d4499 100644 (file)
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
                /* let's register it anyway to preserve ordering */
                slave->offset = 0;
                slave->mtd.size = 0;
+
+               /* Initialize ->erasesize to make add_mtd_device() happy. */
+               slave->mtd.erasesize = parent->erasesize;
+
                printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
                        part->name);
                goto out_register;
@@ -632,7 +636,6 @@ err_remove_part:
        mutex_unlock(&mtd_partitions_mutex);
 
        free_partition(new);
-       pr_info("%s:%i\n", __func__, __LINE__);
 
        return ret;
 }
index bd4cfac6b5aa69e8dbc6cec34c02ca59a8399912..a4768df5083f9633a965bf85f12dce4b1d6ad35c 100644 (file)
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
 
        /*
         * Reset BCH here, too. We got failures otherwise :(
-        * See later BCH reset for explanation of MX23 handling
+        * See later BCH reset for explanation of MX23 and MX28 handling
         */
-       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+       ret = gpmi_reset_block(r->bch_regs,
+                              GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
        if (ret)
                goto err_out;
 
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        /*
        * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
        * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
-       * On the other hand, the MX28 needs the reset, because one case has been
-       * seen where the BCH produced ECC errors constantly after 10000
-       * consecutive reboots. The latter case has not been seen on the MX23
-       * yet, still we don't know if it could happen there as well.
+       * and MX28.
        */
-       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+       ret = gpmi_reset_block(r->bch_regs,
+                              GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
        if (ret)
                goto err_out;
 
index cca4b24d2ffa8e13151b61721053101f2a55644e..839494ac457c15c4697a357f2136f0cf0852acc6 100644 (file)
@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip)
 
 /**
  * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: NAND chip object
  * @oob: oob data buffer
  * @len: oob data write length
  * @ops: oob ops structure
index 1b722fe9213c31ca56fedd21736eb1e7b678c9f2..19a2b563acdfe63644572434efed785b78cc7692 100644 (file)
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
 
 /**
  * read_bbt - [GENERIC] Read the bad block table starting from page
- * @chip: NAND chip object
+ * @this: NAND chip object
  * @buf: temporary buffer
  * @page: the starting page
  * @num: the number of bbt descriptors to read
index 479c2f2cf17f91ee44978b221391f91a928c0b95..fa87ae28cdfecd21fb6fb435fac35047a3b37325 100644 (file)
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
        struct nand_device *nand = spinand_to_nand(spinand);
        struct mtd_info *mtd = nanddev_to_mtd(nand);
        struct nand_page_io_req adjreq = *req;
-       unsigned int nbytes = 0;
-       void *buf = NULL;
+       void *buf = spinand->databuf;
+       unsigned int nbytes;
        u16 column = 0;
        int ret;
 
-       memset(spinand->databuf, 0xff,
-              nanddev_page_size(nand) +
-              nanddev_per_page_oobsize(nand));
+       /*
+        * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+        * the cache content to 0xFF (depends on vendor implementation), so we
+        * must fill the page cache entirely even if we only want to program
+        * the data portion of the page, otherwise we might corrupt the BBM or
+        * user data previously programmed in OOB area.
+        */
+       nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+       memset(spinand->databuf, 0xff, nbytes);
+       adjreq.dataoffs = 0;
+       adjreq.datalen = nanddev_page_size(nand);
+       adjreq.databuf.out = spinand->databuf;
+       adjreq.ooblen = nanddev_per_page_oobsize(nand);
+       adjreq.ooboffs = 0;
+       adjreq.oobbuf.out = spinand->oobbuf;
 
-       if (req->datalen) {
+       if (req->datalen)
                memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
                       req->datalen);
-               adjreq.dataoffs = 0;
-               adjreq.datalen = nanddev_page_size(nand);
-               adjreq.databuf.out = spinand->databuf;
-               nbytes = adjreq.datalen;
-               buf = spinand->databuf;
-       }
 
        if (req->ooblen) {
                if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
                else
                        memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
                               req->ooblen);
-
-               adjreq.ooblen = nanddev_per_page_oobsize(nand);
-               adjreq.ooboffs = 0;
-               nbytes += nanddev_per_page_oobsize(nand);
-               if (!buf) {
-                       buf = spinand->oobbuf;
-                       column = nanddev_page_size(nand);
-               }
        }
 
        spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
 
                /*
                 * We need to use the RANDOM LOAD CACHE operation if there's
-                * more than one iteration, because the LOAD operation resets
-                * the cache to 0xff.
+                * more than one iteration, because the LOAD operation might
+                * reset the cache to 0xff.
                 */
                if (nbytes) {
                        column = op.addr.val;
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand)
        for (i = 0; i < nand->memorg.ntargets; i++) {
                ret = spinand_select_target(spinand, i);
                if (ret)
-                       goto err_free_bufs;
+                       goto err_manuf_cleanup;
 
                ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
                if (ret)
-                       goto err_free_bufs;
+                       goto err_manuf_cleanup;
        }
 
        ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
index 90f51425298709ff47b09cb7de4e1b80acd14e7f..d9c56a779c088e3b68913615ffb000e742026caa 100644 (file)
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
        /* Clear all pending interrupts */
        writel(0xffffffff, priv->regs + B53_SRAB_INTR);
 
-       if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
-               return;
-
        for (i = 0; i < B53_N_PORTS; i++) {
                port = &priv->port_intrs[i];
 
index 5200e4bdce93d19f3a3938f3fabbe74743b1352d..ea243840ee0fe62e28c5ccc6599533d406e22de8 100644 (file)
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
 {
        struct mv88e6xxx_chip *chip = dev_id;
        struct mv88e6xxx_atu_entry entry;
+       int spid;
        int err;
        u16 val;
 
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
        if (err)
                goto out;
 
+       spid = entry.state;
+
        if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
                dev_err_ratelimited(chip->dev,
                                    "ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
 
        if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
                dev_err_ratelimited(chip->dev,
-                                   "ATU member violation for %pM portvec %x\n",
-                                   entry.mac, entry.portvec);
-               chip->ports[entry.portvec].atu_member_violation++;
+                                   "ATU member violation for %pM portvec %x spid %d\n",
+                                   entry.mac, entry.portvec, spid);
+               chip->ports[spid].atu_member_violation++;
        }
 
        if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
                dev_err_ratelimited(chip->dev,
-                                   "ATU miss violation for %pM portvec %x\n",
-                                   entry.mac, entry.portvec);
-               chip->ports[entry.portvec].atu_miss_violation++;
+                                   "ATU miss violation for %pM portvec %x spid %d\n",
+                                   entry.mac, entry.portvec, spid);
+               chip->ports[spid].atu_miss_violation++;
        }
 
        if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
                dev_err_ratelimited(chip->dev,
-                                   "ATU full violation for %pM portvec %x\n",
-                                   entry.mac, entry.portvec);
-               chip->ports[entry.portvec].atu_full_violation++;
+                                   "ATU full violation for %pM portvec %x spid %d\n",
+                                   entry.mac, entry.portvec, spid);
+               chip->ports[spid].atu_full_violation++;
        }
        mutex_unlock(&chip->reg_lock);
 
index f9521d0274b75fdaa52a72a5850ae2e285c03c25..28c9b0bdf2f6cff3fc5cfeed3d8ef0beed1dfb60 100644 (file)
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
                                struct ethtool_wolinfo *wol)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
-       u32 reg;
 
        wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
        wol->wolopts = priv->wolopts;
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
        if (!(priv->wolopts & WAKE_MAGICSECURE))
                return;
 
-       /* Return the programmed SecureOn password */
-       reg = umac_readl(priv, UMAC_PSW_MS);
-       put_unaligned_be16(reg, &wol->sopass[0]);
-       reg = umac_readl(priv, UMAC_PSW_LS);
-       put_unaligned_be32(reg, &wol->sopass[2]);
+       memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
 }
 
 static int bcm_sysport_set_wol(struct net_device *dev,
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
        if (wol->wolopts & ~supported)
                return -EINVAL;
 
-       /* Program the SecureOn password */
-       if (wol->wolopts & WAKE_MAGICSECURE) {
-               umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
-                           UMAC_PSW_MS);
-               umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
-                           UMAC_PSW_LS);
-       }
+       if (wol->wolopts & WAKE_MAGICSECURE)
+               memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
 
        /* Flag the device and relevant IRQ as wakeup capable */
        if (wol->wolopts) {
@@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        unsigned int index, i = 0;
        u32 reg;
 
-       /* Password has already been programmed */
        reg = umac_readl(priv, UMAC_MPD_CTRL);
        if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
                reg |= MPD_EN;
        reg &= ~PSW_EN;
-       if (priv->wolopts & WAKE_MAGICSECURE)
+       if (priv->wolopts & WAKE_MAGICSECURE) {
+               /* Program the SecureOn password */
+               umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+                           UMAC_PSW_MS);
+               umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+                           UMAC_PSW_LS);
                reg |= PSW_EN;
+       }
        umac_writel(priv, reg, UMAC_MPD_CTRL);
 
        if (priv->wolopts & WAKE_FILTER) {
index 0887e63566499b4bee2ac1041315cc6c790c3019..0b192fea9c5d113542b9b2124d214b89844539aa 100644 (file)
@@ -12,6 +12,7 @@
 #define __BCM_SYSPORT_H
 
 #include <linux/bitmap.h>
+#include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/net_dim.h>
 
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
        unsigned int            crc_fwd:1;
        u16                     rev;
        u32                     wolopts;
+       u8                      sopass[SOPASS_MAX];
        unsigned int            wol_irq_disabled:1;
 
        /* MIB related fields */
index 6a512871176bd4ad807b028632f171913daaacc3..8bc7e495b027083942e6963c56bed62a25fd6f5d 100644 (file)
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
                u32 map_idx = ring->map_idx;
+               unsigned int vector;
 
+               vector = bp->irq_tbl[map_idx].vector;
+               disable_irq_nosync(vector);
                rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
-               if (rc)
+               if (rc) {
+                       enable_irq(vector);
                        goto err_out;
+               }
                bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
                bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+               enable_irq(vector);
                bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
 
                if (!i) {
index 5db9f4158e62a24469220170de442d5b5ab15052..134ae2862efab5d91ecfd04ebea9c0d373de2bf4 100644 (file)
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
                 * for transmits, we just free buffers.
                 */
 
-               dev_kfree_skb_irq(sb);
+               dev_consume_skb_irq(sb);
 
                /*
                 * .. and advance to the next buffer.
index 5f03199a3acf28940d78e515820d7af51e3cee4b..05f4a3b21e29ac79c367a4c770cef051b8d422fc 100644 (file)
@@ -54,7 +54,6 @@ config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
        depends on 64BIT && PCI
        imply PTP_1588_CLOCK
-       default y
        ---help---
          This driver adds support for the Precision Time Protocol Clocks and
          Timestamping coprocessor (PTP) found on Cavium processors.
index 60641e202534109f3d1341607b6c4d413b75f65b..9a7f70db20c7f67da8608f65b5e9c98efbce2412 100644 (file)
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                 * csum is correct or is zero.
                 */
                if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
-                   tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
+                   tcp_udp_csum_ok && outer_csum_ok &&
+                   (ipv4_csum_ok || ipv6)) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        skb->csum_level = encap;
                }
index 13430f75496cc3e2b981b02adb3b32275578c3ce..f1a2da15dd0a6f00718eb68fb1eb82214c2c484d 100644 (file)
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
                                netif_dbg(de, tx_done, de->dev,
                                          "tx done, slot %d\n", tx_tail);
                        }
-                       dev_kfree_skb_irq(skb);
+                       dev_consume_skb_irq(skb);
                }
 
 next:
index b90bab72efdb3b3d4370bc5616ff0e8609b85cfd..c1968b3ecec87cf8fcd807c4f34d0be4cc6e27e8 100644 (file)
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
                dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
                                 DMA_TO_DEVICE);
 
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
        }
        spin_unlock(&priv->lock);
 
index c3d539e209ed26a4ee3f022c45be85394a1ef398..eb3e65e8868f9f65047ab24e8094f56e307bb64b 100644 (file)
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
        u16 i, j;
        u8 __iomem *bd;
 
+       netdev_reset_queue(ugeth->ndev);
+
        ug_info = ugeth->ug_info;
        uf_info = &ug_info->uf_info;
 
index 04fd1f135011f1a068bbc79991af0d05a7c56583..654ac534b10e3e8e04911b618d5fd1cb6fb5c8c8 100644 (file)
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        memset(p, 0, regs->len);
        memcpy_fromio(p, io, B3_RAM_ADDR);
 
-       memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
-                     regs->len - B3_RI_WTO_R1);
+       if (regs->len > B3_RI_WTO_R1) {
+               memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+                             regs->len - B3_RI_WTO_R1);
+       }
 }
 
 /* Wake on Lan only supported on Yukon chips with rev 1 or above */
index 046948ead152a4874458aed4b1909be78652842a..f3c7ab6faea505202d48d1b0bd862db010e87e99 100644 (file)
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
        e->m_neigh.family = n->ops->family;
        memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
        e->out_dev = out_dev;
+       e->route_dev = route_dev;
 
        /* It's important to add the neigh to the hash table before checking
         * the neigh validity state. So if we'll get a notification, in case the
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
        e->m_neigh.family = n->ops->family;
        memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
        e->out_dev = out_dev;
+       e->route_dev = route_dev;
 
        /* It's importent to add the neigh to the hash table before checking
         * the neigh validity state. So if we'll get a notification, in case the
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
                       struct mlx5_flow_spec *spec,
                       struct tc_cls_flower_offload *f,
                       void *headers_c,
-                      void *headers_v)
+                      void *headers_v, u8 *match_level)
 {
        int tunnel_type;
        int err = 0;
 
        tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
        if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+               *match_level = MLX5_MATCH_L4;
                err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
                                               headers_c, headers_v);
        } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+               *match_level = MLX5_MATCH_L3;
                err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
                                                headers_c, headers_v);
        } else {
index 706ce7bf15e7f19f88ac4fe6afe7375ff5445c13..b63f15de899d08b8af045f61407ba290a3440255 100644 (file)
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
                       struct mlx5_flow_spec *spec,
                       struct tc_cls_flower_offload *f,
                       void *headers_c,
-                      void *headers_v);
+                      void *headers_v, u8 *match_level);
 
 #endif //__MLX5_EN_TC_TUNNEL_H__
index f2573c2d2b5c246b466e346d69a173689e9f6318..ef9e472daffb0c6be75ae0eb9473d56f0dd34bd5 100644 (file)
@@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
        if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
                ether_addr_copy(e->h_dest, ha);
                ether_addr_copy(eth->h_dest, ha);
+               /* Update the encap source mac, in case that we delete
+                * the flows when encap source mac changed.
+                */
+               ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
 
                mlx5e_tc_encap_flows_add(priv, e);
        }
index edd722824697f42c45a7bf57c9c6c510d202bfea..36eafc877e6bf576e8dbb831bcfd27dd8f73d0aa 100644 (file)
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry {
        unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
 
        struct net_device *out_dev;
+       struct net_device *route_dev;
        int tunnel_type;
        int tunnel_hlen;
        int reformat_type;
index cae6c6d489847629a45dce371cec4bc252c28372..b5c1b039375ae28c82540fa519051853addde7a1 100644 (file)
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr {
        struct net_device *filter_dev;
        struct mlx5_flow_spec spec;
        int num_mod_hdr_actions;
+       int max_mod_hdr_actions;
        void *mod_hdr_actions;
        int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
 };
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
 static int parse_tunnel_attr(struct mlx5e_priv *priv,
                             struct mlx5_flow_spec *spec,
                             struct tc_cls_flower_offload *f,
-                            struct net_device *filter_dev)
+                            struct net_device *filter_dev, u8 *match_level)
 {
        struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
        int err = 0;
 
        err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
-                                headers_c, headers_v);
+                                headers_c, headers_v, match_level);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "failed to parse tunnel attributes");
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                              struct mlx5_flow_spec *spec,
                              struct tc_cls_flower_offload *f,
                              struct net_device *filter_dev,
-                             u8 *match_level)
+                             u8 *match_level, u8 *tunnel_match_level)
 {
        struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                switch (key->addr_type) {
                case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
                case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
-                       if (parse_tunnel_attr(priv, spec, f, filter_dev))
+                       if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
                                return -EOPNOTSUPP;
                        break;
                default:
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
        struct mlx5_eswitch_rep *rep;
-       u8 match_level;
        int err;
 
-       err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
+       err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
 
        if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
                rep = rpriv->rep;
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                }
        }
 
-       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
                flow->esw_attr->match_level = match_level;
-       else
+               flow->esw_attr->tunnel_match_level = tunnel_match_level;
+       } else {
                flow->nic_attr->match_level = match_level;
+       }
 
        return err;
 }
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = {
        OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
 };
 
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, it says how many HW actions were
- * actually parsed.
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
+ * says how many HW actions were actually parsed.
  */
 static int offload_pedit_fields(struct pedit_headers *masks,
                                struct pedit_headers *vals,
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
        add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
 
        action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
-       action = parse_attr->mod_hdr_actions;
-       max_actions = parse_attr->num_mod_hdr_actions;
-       nactions = 0;
+       action = parse_attr->mod_hdr_actions +
+                parse_attr->num_mod_hdr_actions * action_size;
+
+       max_actions = parse_attr->max_mod_hdr_actions;
+       nactions = parse_attr->num_mod_hdr_actions;
 
        for (i = 0; i < ARRAY_SIZE(fields); i++) {
                f = &fields[i];
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
        if (!parse_attr->mod_hdr_actions)
                return -ENOMEM;
 
-       parse_attr->num_mod_hdr_actions = max_actions;
+       parse_attr->max_mod_hdr_actions = max_actions;
        return 0;
 }
 
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                        goto out_err;
        }
 
-       err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
-       if (err)
-               goto out_err;
+       if (!parse_attr->mod_hdr_actions) {
+               err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+               if (err)
+                       goto out_err;
+       }
 
        err = offload_pedit_fields(masks, vals, parse_attr, extack);
        if (err < 0)
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
 
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                                          struct tcf_exts *exts,
+                                         u32 actions,
                                          struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
        u16 ethertype;
        int nkeys, i;
 
-       headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+       if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+               headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
+       else
+               headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+
        ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
 
        /* for non-IP we only re-write MACs, so we're okay */
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                return modify_header_match_supported(&parse_attr->spec, exts,
-                                                    extack);
+                                                    actions, extack);
 
        return true;
 }
index 598ad7e4d5c97872c17fe4ae8387e82a2555c96e..0e55cd1f2e984a4b57b12046a16f1db48efd7824 100644 (file)
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
        contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
        if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+#ifdef CONFIG_MLX5_EN_IPSEC
+               struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
+#endif
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
                mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+#ifdef CONFIG_MLX5_EN_IPSEC
+               wqe->eth = cur_eth;
+#endif
        }
 
        /* fill wqe */
index 9c89eea9b2c331752922fdf4b0b382b2f50c99e5..748ff178a1d66be5c55b3c1e3756d555eb3268c6 100644 (file)
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr {
        } dests[MLX5_MAX_FLOW_FWD_VPORTS];
        u32     mod_hdr_id;
        u8      match_level;
+       u8      tunnel_match_level;
        struct mlx5_fc *counter;
        u32     chain;
        u16     prio;
index 53065b6ae59376089764fb5cb72f61de7f395c7d..d4e6fe5b9300c69d9ca219e79b1df3dbee1cd891 100644 (file)
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                MLX5_SET_TO_ONES(fte_match_set_misc, misc,
                                 source_eswitch_owner_vhca_id);
 
-       if (attr->match_level == MLX5_MATCH_NONE)
-               spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
-       else
-               spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
-                                             MLX5_MATCH_MISC_PARAMETERS;
-
-       if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
-               spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
+       spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+       if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+               if (attr->tunnel_match_level != MLX5_MATCH_NONE)
+                       spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+               if (attr->match_level != MLX5_MATCH_NONE)
+                       spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
+       } else if (attr->match_level != MLX5_MATCH_NONE) {
+               spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+       }
 
        if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                flow_act.modify_id = attr->mod_hdr_id;
index 24a90163775e169eea28140aec8f6d3f4585758d..2d8a77cc156baf4f882505cd483a8bad48caecfa 100644 (file)
@@ -53,7 +53,7 @@
 extern const struct qed_common_ops qed_common_ops_pass;
 
 #define QED_MAJOR_VERSION              8
-#define QED_MINOR_VERSION              33
+#define QED_MINOR_VERSION              37
 #define QED_REVISION_VERSION           0
 #define QED_ENGINEERING_VERSION                20
 
index e68ca83ae9153786322f28c1f6fb612706e24b36..58be1c4c66684797b79e4bf5d1ca834fe2d61f9d 100644 (file)
@@ -2216,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
                        u16 num_queues = 0;
 
                        /* Since the feature controls only queue-zones,
-                        * make sure we have the contexts [rx, tx, xdp] to
+                        * make sure we have the contexts [rx, xdp, tcs] to
                         * match.
                         */
                        for_each_hwfn(cdev, i) {
@@ -2226,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
                                u16 cids;
 
                                cids = hwfn->pf_params.eth_pf_params.num_cons;
-                               num_queues += min_t(u16, l2_queues, cids / 3);
+                               cids /= (2 + info->num_tc);
+                               num_queues += min_t(u16, l2_queues, cids);
                        }
 
                        /* queues might theoretically be >256, but interrupts'
@@ -2870,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
        p_hwfn = p_cid->p_owner;
        rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
        if (rc)
-               DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
+               DP_VERBOSE(cdev, QED_MSG_DEBUG,
+                          "Unable to read queue coalescing\n");
 
        return rc;
 }
index 4179c9013fc65fea92c132c927cdaf5bc5637fe4..96ab77ae6af59f4e0268393cd85c3494039b273e 100644 (file)
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
  * @param p_hwfn
  */
 void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
 
 /**
  * @file
index 888274fa208bc768b2ab9db2514407573bfab2e1..5a495fda9e9dd4f34ef7ec4b6dbf4dc0ebe6c6db 100644 (file)
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
 
        p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
        p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
+       if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+               p_ent->ramrod.pf_update.mf_vlan |=
+                       cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
index eb88bbc6b1931adfa1c5d5760739fa0702e90dee..ba64ff9bedbd2078ce543abdf045056a5f5e4ce3 100644 (file)
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 
        qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
 
+       /* Attempt to post pending requests */
+       spin_lock_bh(&p_hwfn->p_spq->lock);
+       rc = qed_spq_pend_post(p_hwfn);
+       spin_unlock_bh(&p_hwfn->p_spq->lock);
+
        return rc;
 }
 
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
 {
        struct qed_spq *p_spq = p_hwfn->p_spq;
        struct qed_spq_entry *p_ent = NULL;
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
        struct qed_spq_entry    *p_ent = NULL;
        struct qed_spq_entry    *tmp;
        struct qed_spq_entry    *found = NULL;
-       int                     rc;
 
        if (!p_hwfn)
                return -EINVAL;
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                 */
                qed_spq_return_entry(p_hwfn, found);
 
-       /* Attempt to post pending requests */
-       spin_lock_bh(&p_spq->lock);
-       rc = qed_spq_pend_post(p_hwfn);
-       spin_unlock_bh(&p_spq->lock);
-
-       return rc;
+       return 0;
 }
 
 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
index 613249d1e967d5310df16de8576f3a4d9c317521..730997b1374733575c95e8f2168393398f17ee72 100644 (file)
@@ -56,7 +56,7 @@
 #include <net/tc_act/tc_gact.h>
 
 #define QEDE_MAJOR_VERSION             8
-#define QEDE_MINOR_VERSION             33
+#define QEDE_MINOR_VERSION             37
 #define QEDE_REVISION_VERSION          0
 #define QEDE_ENGINEERING_VERSION       20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -494,6 +494,9 @@ struct qede_reload_args {
 
 /* Datapath functions definition */
 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+                     struct net_device *sb_dev,
+                     select_queue_fallback_t fallback);
 netdev_features_t qede_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
                                      netdev_features_t features);
index bdf816fe5a16cf8c5d1cdf4b72812e49e3a6d688..31b046e24565f38dbdb117ec6cd0f69430419c45 100644 (file)
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+                     struct net_device *sb_dev,
+                     select_queue_fallback_t fallback)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       int total_txq;
+
+       total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+       return QEDE_TSS_COUNT(edev) ?
+               fallback(dev, skb, NULL) % total_txq :  0;
+}
+
 /* 8B udp header + 8B base tunnel header + 32B option length */
 #define QEDE_MAX_TUN_HDR_LEN 48
 
index 5a74fcbdbc2b59fde83468f2d6538296fa35d2c6..9790f26d17c4a9eb00bba41afec3d2ab994da9bf 100644 (file)
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
        .ndo_start_xmit = qede_start_xmit,
+       .ndo_select_queue = qede_select_queue,
        .ndo_set_rx_mode = qede_set_rx_mode,
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
        .ndo_start_xmit = qede_start_xmit,
+       .ndo_select_queue = qede_select_queue,
        .ndo_set_rx_mode = qede_set_rx_mode,
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
        .ndo_start_xmit = qede_start_xmit,
+       .ndo_select_queue = qede_select_queue,
        .ndo_set_rx_mode = qede_set_rx_mode,
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
index 15c62c160953308b3f4018e8bd7973dee9fa59cd..be47d864f8b9b045ef5b0f7a9e68d097bae15c1c 100644 (file)
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
                skb = ep->tx_skbuff[entry];
                pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
                                 skb->len, PCI_DMA_TODEVICE);
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
                ep->tx_skbuff[entry] = NULL;
        }
 
index d1f61c25d82bc8e25c1026bd20105614a73a1b05..5d85742a2be0b62189ce83dbc0de40714f6eb34c 100644 (file)
@@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
 {
        unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-       if (!clk)
-               return 0;
+       if (!clk) {
+               clk = priv->plat->clk_ref_rate;
+               if (!clk)
+                       return 0;
+       }
 
        return (usec * (clk / 1000000)) / 256;
 }
@@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
 {
        unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
-       if (!clk)
-               return 0;
+       if (!clk) {
+               clk = priv->plat->clk_ref_rate;
+               if (!clk)
+                       return 0;
+       }
 
        return (riwt * 256) / (clk / 1000000);
 }
index 5afba69981cfcf87b98bf5a98f3a85305e5cfd99..685d20472358ec88f04f8a333913465fbdde5cb7 100644 (file)
@@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_q = &priv->tx_queue[queue];
 
+       if (priv->tx_path_in_lpi_mode)
+               stmmac_disable_eee_mode(priv);
+
        /* Manage oversized TCP frames for GMAC4 device */
        if (skb_is_gso(skb) && priv->tso) {
-               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+               if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+                       /*
+                        * There is no way to determine the number of TSO
+                        * capable Queues. Let's use always the Queue 0
+                        * because if TSO is supported then at least this
+                        * one will be capable.
+                        */
+                       skb_set_queue_mapping(skb, 0);
+
                        return stmmac_tso_xmit(skb, dev);
+               }
        }
 
        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       if (priv->tx_path_in_lpi_mode)
-               stmmac_disable_eee_mode(priv);
-
        entry = tx_q->cur_tx;
        first_entry = entry;
        WARN_ON(tx_q->tx_skbuff[first_entry]);
index 7ec4eb74fe2160b53af40e08e4bc95feda125100..6fc05c106afc6fcef6e8f8855cdcbc588c33fea1 100644 (file)
@@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
                cp->net_stats[ring].tx_packets++;
                cp->net_stats[ring].tx_bytes += skb->len;
                spin_unlock(&cp->stat_lock[ring]);
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
        }
        cp->tx_old[ring] = entry;
 
index 720b7ac77f3b3c08e428faaf16ba4cbae1b76275..e9b757b03b564377079d22af1b60580584811744 100644 (file)
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
 
                DTX(("skb(%p) ", skb));
                bp->tx_skbs[elem] = NULL;
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
 
                elem = NEXT_TX(elem);
        }
index ff641cf30a4e1306baa5573bf5e7f464eaa5e729..d007dfeba5c356d7da3b9c211840d791dd34059d 100644 (file)
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
                        this = &txbase[elem];
                }
 
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
                dev->stats.tx_packets++;
        }
        hp->tx_old = elem;
index dc966ddb6d815038b487f2edd05279e9397c30e7..b24c11187017d88b75ca9686603d5d32d7039e97 100644 (file)
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
                tx_level -= db->rptr->len;      /* '-' koz len is negative */
 
                /* now should come skb pointer - free it */
-               dev_kfree_skb_irq(db->rptr->addr.skb);
+               dev_consume_skb_irq(db->rptr->addr.skb);
                bdx_tx_db_inc_rptr(db);
        }
 
index 82412691ee66bf13b488db6d61072f239d27d9c3..27f6cf140845fb357a2880757e343c1ab024d14d 100644 (file)
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
                dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
                                 le16_to_cpu(pktlen), DMA_TO_DEVICE);
        }
-       dev_kfree_skb_irq(skb);
+       dev_consume_skb_irq(skb);
        tdinfo->skb = NULL;
 }
 
index 38ac8ef41f5fca9e1b9dcf10c63cb5ea65d6eb47..56b7791911bfc3b32d87922ca45632202e7963a4 100644 (file)
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
                                 bp->descr_block_virt->xmt_data[comp].long_1,
                                 p_xmt_drv_descr->p_skb->len,
                                 DMA_TO_DEVICE);
-               dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
+               dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
 
                /*
                 * Move to start of next packet by updating completion index
index 58bbba8582b0960de94296ccbbed9c074b12aed0..3377ac66a34748bf03c5875410f808562794ed18 100644 (file)
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev,
        }
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6: {
-               struct rt6_info *rt = rt6_lookup(geneve->net,
-                                                &info->key.u.ipv6.dst, NULL, 0,
-                                                NULL, 0);
+               struct rt6_info *rt;
+
+               if (!__in6_dev_get(dev))
+                       break;
+
+               rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
+                               NULL, 0);
 
                if (rt && rt->dst.dev)
                        ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
index 44de81e5f140fba3f6efd3c02932c2c8346f67b8..c589f5ae75bb552f53b39eed367594bc3d420165 100644 (file)
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
                }
                break;
        case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
-                       /* rx is starting */
-                       dev_dbg(printdev(lp), "RX is starting\n");
-                       mcr20a_handle_rx(lp);
+               /* rx is starting */
+               dev_dbg(printdev(lp), "RX is starting\n");
+               mcr20a_handle_rx(lp);
                break;
        case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
index 19bdde60680c29a1e58a3192a5367e0a18eba6c4..7cdac77d0c68527630b89c2eaff874582200d93a 100644 (file)
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
                        if (!err) {
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
-                               mdev->priv_flags |= IFF_L3MDEV_MASTER;
+                               mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
                        } else
                                goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
-                       mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+                       mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
        struct sk_buff *skb;
 
        if (port->mode == IPVLAN_MODE_L3S) {
-               dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+               dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
                ipvlan_unregister_nf_hook(dev_net(dev));
                dev->l3mdev_ops = NULL;
        }
index 18b41bc345ab90856aa0ad17912fb7b8a3421f46..6e8807212aa342228ad5edb63b5bee4cc6cadd81 100644 (file)
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640,
                        struct phy_txts *phy_txts)
 {
        struct skb_shared_hwtstamps shhwtstamps;
+       struct dp83640_skb_info *skb_info;
        struct sk_buff *skb;
-       u64 ns;
        u8 overflow;
+       u64 ns;
 
        /* We must already have the skb that triggered this. */
-
+again:
        skb = skb_dequeue(&dp83640->tx_queue);
-
        if (!skb) {
                pr_debug("have timestamp but tx_queue empty\n");
                return;
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640,
                }
                return;
        }
+       skb_info = (struct dp83640_skb_info *)skb->cb;
+       if (time_after(jiffies, skb_info->tmo)) {
+               kfree_skb(skb);
+               goto again;
+       }
 
        ns = phy2txts(phy_txts);
        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
 static void dp83640_txtstamp(struct phy_device *phydev,
                             struct sk_buff *skb, int type)
 {
+       struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
        struct dp83640_private *dp83640 = phydev->priv;
 
        switch (dp83640->hwts_tx_en) {
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
                /* fall through */
        case HWTSTAMP_TX_ON:
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
                skb_queue_tail(&dp83640->tx_queue, skb);
                break;
 
index 2e12f982534f7f248d3c5be2968b1238eae45137..abb7876a87762d6631d88894f1423c00c313d0a9 100644 (file)
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
        /* SGMII-to-Copper mode initialization */
        if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
-
                /* Select page 18 */
                err = marvell_set_page(phydev, 18);
                if (err < 0)
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
                err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
                if (err < 0)
                        return err;
-
-               /* There appears to be a bug in the 88e1512 when used in
-                * SGMII to copper mode, where the AN advertisement register
-                * clears the pause bits each time a negotiation occurs.
-                * This means we can never be truely sure what was advertised,
-                * so disable Pause support.
-                */
-               linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
-                                  phydev->supported);
-               linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
-                                  phydev->supported);
-               linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
-                                  phydev->advertising);
-               linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
-                                  phydev->advertising);
        }
 
        return m88e1318_config_init(phydev);
index 18656c4094b35a38f761f5f400eea7ecef0626b9..fed298c0cb393e126243c84b44006b94192a1c5e 100644 (file)
@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
        if (rtnl_dereference(tun->xdp_prog))
                sock_set_flag(&tfile->sk, SOCK_XDP);
 
-       tun_set_real_num_queues(tun);
-
        /* device is allowed to go away first, so no need to hold extra
         * refcnt.
         */
@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
        rcu_assign_pointer(tfile->tun, tun);
        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
        tun->numqueues++;
+       tun_set_real_num_queues(tun);
 out:
        return err;
 }
index 8fadd8eaf601c326afb04f28d4f53c5edb61ac54..4cfceb789eea803c07b8971673451cec9a535636 100644 (file)
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
 #define VIRTIO_XDP_TX          BIT(0)
 #define VIRTIO_XDP_REDIR       BIT(1)
 
+#define VIRTIO_XDP_FLAG        BIT(0)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -252,6 +254,21 @@ struct padded_vnet_hdr {
        char padding[4];
 };
 
+static bool is_xdp_frame(void *ptr)
+{
+       return (unsigned long)ptr & VIRTIO_XDP_FLAG;
+}
+
+static void *xdp_to_ptr(struct xdp_frame *ptr)
+{
+       return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
+}
+
+static struct xdp_frame *ptr_to_xdp(void *ptr)
+{
+       return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
+}
+
 /* Converting between virtqueue no. and kernel tx/rx queue no.
  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
  */
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 
        sg_init_one(sq->sg, xdpf->data, xdpf->len);
 
-       err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
+       err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
+                                  GFP_ATOMIC);
        if (unlikely(err))
                return -ENOSPC; /* Caller handle free/refcnt */
 
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct receive_queue *rq = vi->rq;
-       struct xdp_frame *xdpf_sent;
        struct bpf_prog *xdp_prog;
        struct send_queue *sq;
        unsigned int len;
+       int packets = 0;
+       int bytes = 0;
        int drops = 0;
        int kicks = 0;
        int ret, err;
+       void *ptr;
        int i;
 
-       sq = virtnet_xdp_sq(vi);
-
-       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
-               ret = -EINVAL;
-               drops = n;
-               goto out;
-       }
-
        /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
         * indicate XDP resources have been successfully allocated.
         */
        xdp_prog = rcu_dereference(rq->xdp_prog);
-       if (!xdp_prog) {
-               ret = -ENXIO;
+       if (!xdp_prog)
+               return -ENXIO;
+
+       sq = virtnet_xdp_sq(vi);
+
+       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
+               ret = -EINVAL;
                drops = n;
                goto out;
        }
 
        /* Free up any pending old buffers before queueing new ones. */
-       while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
-               xdp_return_frame(xdpf_sent);
+       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+               if (likely(is_xdp_frame(ptr))) {
+                       struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+                       bytes += frame->len;
+                       xdp_return_frame(frame);
+               } else {
+                       struct sk_buff *skb = ptr;
+
+                       bytes += skb->len;
+                       napi_consume_skb(skb, false);
+               }
+               packets++;
+       }
 
        for (i = 0; i < n; i++) {
                struct xdp_frame *xdpf = frames[i];
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
        }
 out:
        u64_stats_update_begin(&sq->stats.syncp);
+       sq->stats.bytes += bytes;
+       sq->stats.packets += packets;
        sq->stats.xdp_tx += n;
        sq->stats.xdp_tx_drops += drops;
        sq->stats.kicks += kicks;
@@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 
 static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
 {
-       struct sk_buff *skb;
        unsigned int len;
        unsigned int packets = 0;
        unsigned int bytes = 0;
+       void *ptr;
 
-       while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-               pr_debug("Sent skb %p\n", skb);
+       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+               if (likely(!is_xdp_frame(ptr))) {
+                       struct sk_buff *skb = ptr;
 
-               bytes += skb->len;
-               packets++;
+                       pr_debug("Sent skb %p\n", skb);
+
+                       bytes += skb->len;
+                       napi_consume_skb(skb, in_napi);
+               } else {
+                       struct xdp_frame *frame = ptr_to_xdp(ptr);
 
-               napi_consume_skb(skb, in_napi);
+                       bytes += frame->len;
+                       xdp_return_frame(frame);
+               }
+               packets++;
        }
 
        /* Avoid overhead when no packets have been processed
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
        u64_stats_update_end(&sq->stats.syncp);
 }
 
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+               return false;
+       else if (q < vi->curr_queue_pairs)
+               return true;
+       else
+               return false;
+}
+
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
        struct send_queue *sq = &vi->sq[index];
        struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
 
-       if (!sq->napi.weight)
+       if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
                return;
 
        if (__netif_tx_trylock(txq)) {
@@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
 {
        struct send_queue *sq = container_of(napi, struct send_queue, napi);
        struct virtnet_info *vi = sq->vq->vdev->priv;
-       struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+       unsigned int index = vq2txq(sq->vq);
+       struct netdev_queue *txq;
 
+       if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+               /* We don't need to enable cb for XDP */
+               napi_complete_done(napi, 0);
+               return 0;
+       }
+
+       txq = netdev_get_tx_queue(vi->dev, index);
        __netif_tx_lock(txq, raw_smp_processor_id());
        free_old_xmit_skbs(sq, true);
        __netif_tx_unlock(txq);
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                return -ENOMEM;
        }
 
+       old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
+       if (!prog && !old_prog)
+               return 0;
+
        if (prog) {
                prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
                if (IS_ERR(prog))
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
        }
 
        /* Make sure NAPI is not using any XDP TX queues for RX. */
-       if (netif_running(dev))
-               for (i = 0; i < vi->max_queue_pairs; i++)
+       if (netif_running(dev)) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
                        napi_disable(&vi->rq[i].napi);
+                       virtnet_napi_tx_disable(&vi->sq[i].napi);
+               }
+       }
+
+       if (!prog) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+                       if (i == 0)
+                               virtnet_restore_guest_offloads(vi);
+               }
+               synchronize_net();
+       }
 
-       netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
        if (err)
                goto err;
+       netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        vi->xdp_queue_pairs = xdp_qp;
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
-               old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
-               rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
-               if (i == 0) {
-                       if (!old_prog)
+       if (prog) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
+                       if (i == 0 && !old_prog)
                                virtnet_clear_guest_offloads(vi);
-                       if (!prog)
-                               virtnet_restore_guest_offloads(vi);
                }
+       }
+
+       for (i = 0; i < vi->max_queue_pairs; i++) {
                if (old_prog)
                        bpf_prog_put(old_prog);
-               if (netif_running(dev))
+               if (netif_running(dev)) {
                        virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+                                              &vi->sq[i].napi);
+               }
        }
 
        return 0;
 
 err:
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+       if (!prog) {
+               virtnet_clear_guest_offloads(vi);
+               for (i = 0; i < vi->max_queue_pairs; i++)
+                       rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
+       }
+
+       if (netif_running(dev)) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+                                              &vi->sq[i].napi);
+               }
+       }
        if (prog)
                bpf_prog_sub(prog, vi->max_queue_pairs - 1);
        return err;
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
                        put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-               return false;
-       else if (q < vi->curr_queue_pairs)
-               return true;
-       else
-               return false;
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
        void *buf;
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
        for (i = 0; i < vi->max_queue_pairs; i++) {
                struct virtqueue *vq = vi->sq[i].vq;
                while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-                       if (!is_xdp_raw_buffer_queue(vi, i))
+                       if (!is_xdp_frame(buf))
                                dev_kfree_skb(buf);
                        else
-                               put_page(virt_to_head_page(buf));
+                               xdp_return_frame(ptr_to_xdp(buf));
                }
        }
 
index c0b0f525c87ca81c52ba66e0563288c558a7605c..27decf8ae840b95e3b8568a7d4f94cb8805e9093 100644 (file)
@@ -1575,7 +1575,7 @@ try:
                                        dev->stats.tx_packets++;
                                        dev->stats.tx_bytes += skb->len;
                                }
-                               dev_kfree_skb_irq(skb);
+                               dev_consume_skb_irq(skb);
                                dpriv->tx_skbuff[cur] = NULL;
                                ++dpriv->tx_dirty;
                        } else {
index 66d889d54e58ce7bd8f760a0b877c24320a70352..a08f04c3f64410c9ca8d78ff62802ac1c417be40 100644 (file)
@@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
                memset(priv->tx_buffer +
                       (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
                       0, skb->len);
-               dev_kfree_skb_irq(skb);
+               dev_consume_skb_irq(skb);
 
                priv->tx_skbuff[priv->skb_dirtytx] = NULL;
                priv->skb_dirtytx =
index 399b501f3c3c19d73f6183ad3a5ad692fefc86ff..e8891f5fc83ada175eebe0bd389ea0e7494ba5b9 100644 (file)
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
                .id = WCN3990_HW_1_0_DEV_VERSION,
                .dev_id = 0,
-               .bus = ATH10K_BUS_PCI,
+               .bus = ATH10K_BUS_SNOC,
                .name = "wcn3990 hw1.0",
                .continuous_frag_desc = true,
                .tx_chain_mask = 0x7,
index 491ca3c8b43cb745c7ea0beed63e6097fbc96a64..83d5bceea08f75c0f37b65f873600ba1c7eb0b79 100644 (file)
@@ -1,6 +1,6 @@
 config IWLWIFI
        tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
-       depends on PCI && HAS_IOMEM
+       depends on PCI && HAS_IOMEM && CFG80211
        select FW_LOADER
        ---help---
          Select to build the driver supporting the:
@@ -47,6 +47,7 @@ if IWLWIFI
 config IWLWIFI_LEDS
        bool
        depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+       depends on IWLMVM || IWLDVM
        select LEDS_TRIGGERS
        select MAC80211_LEDS
        default y
index 497e762978cca5fa6b48cb78939d16dc740a0f26..b2cabce1d74d78e97666200194724ea8c6852981 100644 (file)
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
        mt76x02_add_rate_power_offset(t, delta);
 }
 
-void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
+void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
 {
        struct mt76x0_chan_map {
                u8 chan;
                u8 offset;
        } chan_map[] = {
-               {   2,  0 }, {   4,  1 }, {   6,  2 }, {   8,  3 },
-               {  10,  4 }, {  12,  5 }, {  14,  6 }, {  38,  0 },
-               {  44,  1 }, {  48,  2 }, {  54,  3 }, {  60,  4 },
-               {  64,  5 }, { 102,  6 }, { 108,  7 }, { 112,  8 },
-               { 118,  9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
-               { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
-               { 167, 17 }, { 171, 18 }, { 173, 19 },
+               {   2,  0 }, {   4,  2 }, {   6,  4 }, {   8,  6 },
+               {  10,  8 }, {  12, 10 }, {  14, 12 }, {  38,  0 },
+               {  44,  2 }, {  48,  4 }, {  54,  6 }, {  60,  8 },
+               {  64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
+               { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
+               { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
+               { 167, 34 }, { 171, 36 }, { 175, 38 },
        };
        struct ieee80211_channel *chan = dev->mt76.chandef.chan;
        u8 offset, addr;
+       int i, idx = 0;
        u16 data;
-       int i;
 
        if (mt76x0_tssi_enabled(dev)) {
                s8 target_power;
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
                else
                        data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
                target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
-               info[0] = target_power + mt76x0_get_delta(dev);
-               info[1] = 0;
+               *tp = target_power + mt76x0_get_delta(dev);
 
                return;
        }
 
        for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
-               if (chan_map[i].chan <= chan->hw_value) {
+               if (chan->hw_value <= chan_map[i].chan) {
+                       idx = (chan->hw_value == chan_map[i].chan);
                        offset = chan_map[i].offset;
                        break;
                }
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
                addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
        } else {
                switch (chan->hw_value) {
+               case 42:
+                       offset = 2;
+                       break;
                case 58:
                        offset = 8;
                        break;
                case 106:
                        offset = 14;
                        break;
-               case 112:
+               case 122:
                        offset = 20;
                        break;
                case 155:
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
        }
 
        data = mt76x02_eeprom_get(dev, addr);
-
-       info[0] = data;
-       if (!info[0] || info[0] > 0x3f)
-               info[0] = 5;
-
-       info[1] = data >> 8;
-       if (!info[1] || info[1] > 0x3f)
-               info[1] = 5;
+       *tp = data >> (8 * idx);
+       if (*tp < 0 || *tp > 0x3f)
+               *tp = 5;
 }
 
 static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
index ee9ade9f3c8bf1394dce37618613e07d0400c08d..42b259f90b6d3f6f8ce8ef76bd97f061f7c584bf 100644 (file)
@@ -26,7 +26,7 @@ struct mt76x02_dev;
 int mt76x0_eeprom_init(struct mt76x02_dev *dev);
 void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
 void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
-void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
+void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
 
 static inline s8 s6_to_s8(u32 val)
 {
index 1eb1a802ed20d2a8744fa6496c4857c43a762046..b6166703ad763742475a4cfdf80bcc1f0479cd27 100644 (file)
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
 void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
 {
        struct mt76_rate_power *t = &dev->mt76.rate_power;
-       u8 info[2];
+       s8 info;
 
        mt76x0_get_tx_power_per_rate(dev);
-       mt76x0_get_power_info(dev, info);
+       mt76x0_get_power_info(dev, &info);
 
-       mt76x02_add_rate_power_offset(t, info[0]);
+       mt76x02_add_rate_power_offset(t, info);
        mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
        dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
-       mt76x02_add_rate_power_offset(t, -info[0]);
+       mt76x02_add_rate_power_offset(t, -info);
 
-       mt76x02_phy_set_txpower(dev, info[0], info[1]);
+       mt76x02_phy_set_txpower(dev, info, info);
 }
 
 void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
index bd10165d7eec5b4d28c22d06e7a3abad79ade277..4d4b077011497b140fa56d2ac74159bdb8dcc25d 100644 (file)
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
        }
 
        sdio_claim_host(func);
+       /*
+        * To guarantee that the SDIO card is power cycled, as required to make
+        * the FW programming to succeed, let's do a brute force HW reset.
+        */
+       mmc_hw_reset(card->host);
+
        sdio_enable_func(func);
        sdio_release_host(func);
 
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
 {
        struct sdio_func *func = dev_to_sdio_func(glue->dev);
        struct mmc_card *card = func->card;
-       int error;
 
        sdio_claim_host(func);
        sdio_disable_func(func);
        sdio_release_host(func);
 
        /* Let runtime PM know the card is powered off */
-       error = pm_runtime_put(&card->dev);
-       if (error < 0 && error != -EBUSY) {
-               dev_err(&card->dev, "%s failed: %i\n", __func__, error);
-
-               return error;
-       }
-
+       pm_runtime_put(&card->dev);
        return 0;
 }
 
index 150e49723c15af38167f3d6aa6a3ddb9a745ed4b..6a9dd68c0f4feb673e57ebbaba1bfe967832b1ba 100644 (file)
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
         * effects say only one namespace is affected.
         */
        if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+               mutex_lock(&ctrl->scan_lock);
                nvme_start_freeze(ctrl);
                nvme_wait_freeze(ctrl);
        }
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
         */
        if (effects & NVME_CMD_EFFECTS_LBCC)
                nvme_update_formats(ctrl);
-       if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+       if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
                nvme_unfreeze(ctrl);
+               mutex_unlock(&ctrl->scan_lock);
+       }
        if (effects & NVME_CMD_EFFECTS_CCC)
                nvme_init_identify(ctrl);
        if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -3401,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_identify_ctrl(ctrl, &id))
                return;
 
+       mutex_lock(&ctrl->scan_lock);
        nn = le32_to_cpu(id->nn);
        if (ctrl->vs >= NVME_VS(1, 1, 0) &&
            !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3409,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work)
        }
        nvme_scan_ns_sequential(ctrl, nn);
 out_free_id:
+       mutex_unlock(&ctrl->scan_lock);
        kfree(id);
        down_write(&ctrl->namespaces_rwsem);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3652,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 
        ctrl->state = NVME_CTRL_NEW;
        spin_lock_init(&ctrl->lock);
+       mutex_init(&ctrl->scan_lock);
        INIT_LIST_HEAD(&ctrl->namespaces);
        init_rwsem(&ctrl->namespaces_rwsem);
        ctrl->dev = dev;
index ab961bdeea89ad5e3c6d2f0be0480ca23083bd7c..c4a1bb41abf0041e41e18a40db2dc4280be0aab4 100644 (file)
@@ -154,6 +154,7 @@ struct nvme_ctrl {
        enum nvme_ctrl_state state;
        bool identified;
        spinlock_t lock;
+       struct mutex scan_lock;
        const struct nvme_ctrl_ops *ops;
        struct request_queue *admin_q;
        struct request_queue *connect_q;
index 9bc585415d9ba6b639bdbfd023d217a5b126084c..022ea1ee63f8d1998f8f7deaf75470e344a66a06 100644 (file)
@@ -2557,16 +2557,7 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
-       /*
-        * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
-        * initializing procedure here.
-        */
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
-               dev_warn(dev->ctrl.device,
-                       "failed to mark controller CONNECTING\n");
-               goto out;
-       }
-
+       mutex_lock(&dev->shutdown_lock);
        result = nvme_pci_enable(dev);
        if (result)
                goto out;
@@ -2585,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work)
         */
        dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
        dev->ctrl.max_segments = NVME_MAX_SEGS;
+       mutex_unlock(&dev->shutdown_lock);
+
+       /*
+        * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
+        * initializing procedure here.
+        */
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+               dev_warn(dev->ctrl.device,
+                       "failed to mark controller CONNECTING\n");
+               goto out;
+       }
 
        result = nvme_init_identify(&dev->ctrl);
        if (result)
index b0a413f3f7cabbdc0ccf5f0f9b06c137c5144cc8..e2a879e93d8680dc596aa9ff6ba946bdcd0dcf13 100644 (file)
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev)
                break;
        }
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
-                        quirk_synopsys_haps);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
+                              PCI_CLASS_SERIAL_USB_XHCI, 0,
+                              quirk_synopsys_haps);
 
 /*
  * Let's make the southbridge information explicit instead of having to
index 05044e323ea5529d15d001337fcad75f089abe23..03ec7a5d9d0bd8014382cad9f316071828bc7f63 100644 (file)
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-                       DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-                       DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-                       DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-                       DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {}
index 1817786ab6aa164384c90c96ade5bc2fc1830102..a005cbccb4f7d527d79533a5e6df16979ec21f8e 100644 (file)
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
 config PINCTRL_MT7623
        bool "Mediatek MT7623 pin control with generic binding"
        depends on MACH_MT7623 || COMPILE_TEST
+       depends on OF
        default MACH_MT7623
        select PINCTRL_MTK_MOORE
 
 config PINCTRL_MT7629
        bool "Mediatek MT7629 pin control"
        depends on MACH_MT7629 || COMPILE_TEST
+       depends on OF
        default MACH_MT7629
        select PINCTRL_MTK_MOORE
 
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
 
 config PINCTRL_MT7622
        bool "MediaTek MT7622 pin control"
+       depends on OF
        depends on ARM64 || COMPILE_TEST
        default ARM64 && ARCH_MEDIATEK
        select PINCTRL_MTK_MOORE
index b03481ef99a13ab63dc8ba188113fcccfe50be78..98905d4a79ca93b52935dc438b4797bb4fc86f78 100644 (file)
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                break;
 
        case MCP_TYPE_S18:
+               one_regmap_config =
+                       devm_kmemdup(dev, &mcp23x17_regmap,
+                               sizeof(struct regmap_config), GFP_KERNEL);
+               if (!one_regmap_config)
+                       return -ENOMEM;
                mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
-                                              &mcp23x17_regmap);
+                                              one_regmap_config);
                mcp->reg_shift = 1;
                mcp->chip.ngpio = 16;
                mcp->chip.label = "mcp23s18";
index aa8b58125568ddcd90d64a76db5e414920e00873..ef4268cc62275057016846703cca8de58f8386f6 100644 (file)
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
 static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
        .pins = h6_pins,
        .npins = ARRAY_SIZE(h6_pins),
-       .irq_banks = 3,
+       .irq_banks = 4,
        .irq_bank_map = h6_irq_bank_map,
        .irq_read_needs_mux = true,
 };
index 5d9184d18c16058fbd0f6e7dd4854c3cc5b8e827..0e7fa69e93dfd32aa74182d90ac07a517f1a32db 100644 (file)
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
 {
        struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
        unsigned short bank = offset / PINS_PER_BANK;
-       struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank];
-       struct regulator *reg;
+       unsigned short bank_offset = bank - pctl->desc->pin_base /
+                                           PINS_PER_BANK;
+       struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
+       struct regulator *reg = s_reg->regulator;
+       char supply[16];
        int ret;
 
-       reg = s_reg->regulator;
-       if (!reg) {
-               char supply[16];
-
-               snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
-               reg = regulator_get(pctl->dev, supply);
-               if (IS_ERR(reg)) {
-                       dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
-                               'A' + bank);
-                       return PTR_ERR(reg);
-               }
-
-               s_reg->regulator = reg;
-               refcount_set(&s_reg->refcount, 1);
-       } else {
+       if (reg) {
                refcount_inc(&s_reg->refcount);
+               return 0;
+       }
+
+       snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
+       reg = regulator_get(pctl->dev, supply);
+       if (IS_ERR(reg)) {
+               dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
+                       'A' + bank);
+               return PTR_ERR(reg);
        }
 
        ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
                goto out;
        }
 
+       s_reg->regulator = reg;
+       refcount_set(&s_reg->refcount, 1);
+
        return 0;
 
 out:
-       if (refcount_dec_and_test(&s_reg->refcount)) {
-               regulator_put(s_reg->regulator);
-               s_reg->regulator = NULL;
-       }
+       regulator_put(s_reg->regulator);
 
        return ret;
 }
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
 {
        struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
        unsigned short bank = offset / PINS_PER_BANK;
-       struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank];
+       unsigned short bank_offset = bank - pctl->desc->pin_base /
+                                           PINS_PER_BANK;
+       struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
 
        if (!refcount_dec_and_test(&s_reg->refcount))
                return 0;
index e340d2a24b44e3874246d78c7fb3b4f0c413882c..034c0317c8d61694c39e8afc127e34dbe5e4c931 100644 (file)
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
        struct gpio_chip                *chip;
        const struct sunxi_pinctrl_desc *desc;
        struct device                   *dev;
-       struct sunxi_pinctrl_regulator  regulators[12];
+       struct sunxi_pinctrl_regulator  regulators[9];
        struct irq_domain               *domain;
        struct sunxi_pinctrl_function   *functions;
        unsigned                        nfunctions;
index 0ee026947f209941ace51b0ff8a476885e46b048..122059ecad848e372ead33052e08f6abac3102d7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/hashtable.h>
 #include <linux/ip.h>
 #include <linux/refcount.h>
+#include <linux/workqueue.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -789,6 +790,7 @@ struct qeth_card {
        struct qeth_seqno seqno;
        struct qeth_card_options options;
 
+       struct workqueue_struct *event_wq;
        wait_queue_head_t wait_q;
        spinlock_t mclock;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
 extern const struct attribute_group qeth_device_attr_group;
 extern const struct attribute_group qeth_device_blkt_group;
 extern const struct device_type qeth_generic_devtype;
-extern struct workqueue_struct *qeth_wq;
 
 int qeth_card_hw_is_reachable(struct qeth_card *);
 const char *qeth_get_cardname_short(struct qeth_card *);
index e63e03143ca7fa2f6995c7a24dbef9c981c7f41c..89f912213e6261ca945837afb31f0b0abb52efc0 100644 (file)
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
-struct workqueue_struct *qeth_wq;
-EXPORT_SYMBOL_GPL(qeth_wq);
+static struct workqueue_struct *qeth_wq;
 
 int qeth_card_hw_is_reachable(struct qeth_card *card)
 {
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
                QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
                                 rc, CARD_DEVID(card));
                atomic_set(&channel->irq_pending, 0);
+               qeth_release_buffer(channel, iob);
                card->read_or_write_problem = 1;
                qeth_schedule_recovery(card);
                wake_up(&card->wait_q);
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                rc = qeth_get_problem(card, cdev, irb);
                if (rc) {
                        card->read_or_write_problem = 1;
+                       if (iob)
+                               qeth_release_buffer(iob->channel, iob);
                        qeth_clear_ipacmd_list(card);
                        qeth_schedule_recovery(card);
                        goto out;
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
        CARD_RDEV(card) = gdev->cdev[0];
        CARD_WDEV(card) = gdev->cdev[1];
        CARD_DDEV(card) = gdev->cdev[2];
+
+       card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+       if (!card->event_wq)
+               goto out_wq;
        if (qeth_setup_channel(&card->read, true))
                goto out_ip;
        if (qeth_setup_channel(&card->write, true))
@@ -1481,6 +1487,8 @@ out_data:
 out_channel:
        qeth_clean_channel(&card->read);
 out_ip:
+       destroy_workqueue(card->event_wq);
+out_wq:
        dev_set_drvdata(&gdev->dev, NULL);
        kfree(card);
 out:
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
                QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
                atomic_set(&channel->irq_pending, 0);
+               qeth_release_buffer(channel, iob);
                wake_up(&card->wait_q);
                return rc;
        }
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
                        rc);
                QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
                atomic_set(&channel->irq_pending, 0);
+               qeth_release_buffer(channel, iob);
                wake_up(&card->wait_q);
                return rc;
        }
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
        }
        reply = qeth_alloc_reply(card);
        if (!reply) {
+               qeth_release_buffer(channel, iob);
                return -ENOMEM;
        }
        reply->callback = reply_cb;
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
        return 0;
 }
 
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
 {
        if (!q)
                return;
 
+       qeth_clear_outq_buffers(q, 1);
        qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
        kfree(q);
 }
@@ -2467,10 +2479,8 @@ out_freeoutqbufs:
                card->qdio.out_qs[i]->bufs[j] = NULL;
        }
 out_freeoutq:
-       while (i > 0) {
-               qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
-               qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-       }
+       while (i > 0)
+               qeth_free_output_queue(card->qdio.out_qs[--i]);
        kfree(card->qdio.out_qs);
        card->qdio.out_qs = NULL;
 out_freepool:
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
        qeth_free_buffer_pool(card);
        /* free outbound qdio_qs */
        if (card->qdio.out_qs) {
-               for (i = 0; i < card->qdio.no_out_queues; ++i) {
-                       qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
-                       qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
-               }
+               for (i = 0; i < card->qdio.no_out_queues; i++)
+                       qeth_free_output_queue(card->qdio.out_qs[i]);
                kfree(card->qdio.out_qs);
                card->qdio.out_qs = NULL;
        }
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card)
        qeth_clean_channel(&card->read);
        qeth_clean_channel(&card->write);
        qeth_clean_channel(&card->data);
+       destroy_workqueue(card->event_wq);
        qeth_free_qdio_buffers(card);
        unregister_service_level(&card->qeth_service_level);
        dev_set_drvdata(&card->gdev->dev, NULL);
index f108d4b44605805b3430a7fc7890340645b92146..a43de2f9bcac4abc253e9421398fd9546753428e 100644 (file)
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
+
+       flush_workqueue(card->event_wq);
 }
 
 static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
 
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l2_set_offline(cgdev);
+
+       cancel_work_sync(&card->close_dev_work);
        if (qeth_netdev_is_registered(card->dev))
                unregister_netdev(card->dev);
 }
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
        data->card = card;
        memcpy(&data->qports, qports,
                        sizeof(struct qeth_sbp_state_change) + extrasize);
-       queue_work(qeth_wq, &data->worker);
+       queue_work(card->event_wq, &data->worker);
 }
 
 struct qeth_bridge_host_data {
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
        data->card = card;
        memcpy(&data->hostevs, hostevs,
                        sizeof(struct qeth_ipacmd_addr_change) + extrasize);
-       queue_work(qeth_wq, &data->worker);
+       queue_work(card->event_wq, &data->worker);
 }
 
 /* SETBRIDGEPORT support; sending commands */
index 42a7cdc59b76af1a8e220f4097a16702a950cab9..df34bff4ac31ba18c62380c6e13a312fdc1d8213 100644 (file)
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
                qeth_clear_cmd_buffers(&card->read);
                qeth_clear_cmd_buffers(&card->write);
        }
+
+       flush_workqueue(card->event_wq);
 }
 
 /*
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l3_set_offline(cgdev);
 
+       cancel_work_sync(&card->close_dev_work);
        if (qeth_netdev_is_registered(card->dev))
                unregister_netdev(card->dev);
        qeth_l3_clear_ip_htable(card, 0);
index f83f79b07b508de2b8f76dbcb481f9c9027bd0b9..07efcb9b5b943109bea84daf60fc223263b8d376 100644 (file)
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        asd_dev_rev[asd_ha->revision_id]);
 }
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
 
 static ssize_t asd_show_dev_bios_build(struct device *dev,
                                       struct device_attribute *attr,char *buf)
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
 {
        int err;
 
-       err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+       err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
        if (err)
                return err;
 
@@ -499,13 +499,13 @@ err_update_bios:
 err_biosb:
        device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
 err_rev:
-       device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+       device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
        return err;
 }
 
 static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
 {
-       device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+       device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
        device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
        device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
        device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
index bfa13e3b191c7aa026af76279a8acc1a708db703..c8bad2c093b8b88f6495f0af61b836f9395b3b1a 100644 (file)
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
 
        cfg = shost_priv(host);
+       cfg->state = STATE_PROBING;
        cfg->host = host;
        rc = alloc_mem(cfg);
        if (rc) {
@@ -3775,6 +3776,7 @@ out:
        return rc;
 
 out_remove:
+       cfg->state = STATE_PROBED;
        cxlflash_remove(pdev);
        goto out;
 }
index 9192a1d9dec6260f12348835a977d3e1f03f36f7..dfba4921b265a0fa7fdf2409295483a68c5e181c 100644 (file)
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
        struct fc_rport_priv *rdata;
 
        rdata = container_of(kref, struct fc_rport_priv, kref);
-       WARN_ON(!list_empty(&rdata->peers));
        kfree_rcu(rdata, rcu);
 }
 EXPORT_SYMBOL(fc_rport_destroy);
index 83365b29a4d8ed515b1381166b5b438ddb60c363..fff86940388bab1315c000b241a5456b297cd418 100644 (file)
@@ -462,12 +462,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
        sdkp->device->use_10_for_rw = 0;
 
        /*
-        * If something changed, revalidate the disk zone bitmaps once we have
-        * the capacity, that is on the second revalidate execution during disk
-        * scan and always during normal revalidate.
+        * Revalidate the disk zone bitmaps once the block device capacity is
+        * set on the second revalidate execution during disk scan and if
+        * something changed when executing a normal revalidate.
         */
-       if (sdkp->first_scan)
+       if (sdkp->first_scan) {
+               sdkp->zone_blocks = zone_blocks;
+               sdkp->nr_zones = nr_zones;
                return 0;
+       }
+
        if (sdkp->zone_blocks != zone_blocks ||
            sdkp->nr_zones != nr_zones ||
            disk->queue->nr_zones != nr_zones) {
index 52c153cd795a8c471b48eb570fa87e5613244c2e..636f83f781f5abf85546d9a754b67baab09d0831 100644 (file)
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work);
 static irqreturn_t portal_isr(int irq, void *ptr)
 {
        struct qman_portal *p = ptr;
-
-       u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
        u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+       u32 clear = 0;
 
        if (unlikely(!is))
                return IRQ_NONE;
 
        /* DQRR-handling if it's interrupt-driven */
-       if (is & QM_PIRQ_DQRI)
+       if (is & QM_PIRQ_DQRI) {
                __poll_portal_fast(p, QMAN_POLL_LIMIT);
+               clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+       }
        /* Handling of anything else that's interrupt-driven */
-       clear |= __poll_portal_slow(p, is);
+       clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
        qm_out(&p->p, QM_REG_ISR, clear);
        return IRQ_HANDLED;
 }
index 2848fa71a33d2c51f1fe7f6e615419125aac9924..d6248eecf123bdc5ad123ac1fd7b5821b9a22e24 100644 (file)
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
                return -ENODEV;
 
        priv->last_link = 0;
-       phy_start_aneg(phydev);
+       phy_start(phydev);
 
        return 0;
 no_phy:
index c92bbd05516e5561172049207429f4d6ce548d3d..005de0024dd405086e6fa79fa5c1abf6c8d6ac83 100644 (file)
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
                return;
        }
 
-       speakup_tty->ops->send_xchar(speakup_tty, ch);
+       if (speakup_tty->ops->send_xchar)
+               speakup_tty->ops->send_xchar(speakup_tty, ch);
        mutex_unlock(&speakup_tty_mutex);
 }
 
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
                return;
        }
 
-       speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+       if (speakup_tty->ops->tiocmset)
+               speakup_tty->ops->tiocmset(speakup_tty, set, clear);
        mutex_unlock(&speakup_tty_mutex);
 }
 
index 72016d0dfca5dcc68483f6bfa1cc90adea2b2480..8e7fffbb880291e494dc8074e1fbdaa884426fea 100644 (file)
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item,
        return count;
 }
 
+/* always zero, but attr needs to remain RW to avoid userspace breakage */
+static ssize_t pi_prot_format_show(struct config_item *item, char *page)
+{
+       return snprintf(page, PAGE_SIZE, "0\n");
+}
+
 static ssize_t pi_prot_format_store(struct config_item *item,
                const char *page, size_t count)
 {
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc);
 CONFIGFS_ATTR(, emulate_pr);
 CONFIGFS_ATTR(, pi_prot_type);
 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
-CONFIGFS_ATTR_WO(, pi_prot_format);
+CONFIGFS_ATTR(, pi_prot_format);
 CONFIGFS_ATTR(, pi_prot_verify);
 CONFIGFS_ATTR(, enforce_pr_isids);
 CONFIGFS_ATTR(, is_nonrot);
index e2c407656fa6c8045d33d43f9fa6d9ffe113fcb4..c1fdbc0b68401bf3b1030faf9a41321ae045013e 100644 (file)
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
        if (dmacnt == 2) {
                data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma),
                                         GFP_KERNEL);
+               if (!data->dma)
+                       return -ENOMEM;
+
                data->dma->fn = mtk8250_dma_filter;
                data->dma->rx_size = MTK_UART_RX_SIZE;
                data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
index f80a300b5d68f6e8ad61b7daf2544234da7e1662..48bd694a5fa1f825b2432ffc019975595bc5538b 100644 (file)
@@ -3420,6 +3420,11 @@ static int
 serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
 {
        int num_iomem, num_port, first_port = -1, i;
+       int rc;
+
+       rc = serial_pci_is_class_communication(dev);
+       if (rc)
+               return rc;
 
        /*
         * Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
 
        board = &pci_boards[ent->driver_data];
 
-       rc = serial_pci_is_class_communication(dev);
-       if (rc)
-               return rc;
-
        rc = serial_pci_is_blacklisted(dev);
        if (rc)
                return rc;
index 5c01bb6d1c24f7081ce537c5445566266af0c387..556f50aa1b586ce9876d61ca35d95b0a4568b088 100644 (file)
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
        struct uart_port *port;
        unsigned long flags;
 
+       if (!state)
+               return;
+
        port = uart_port_lock(state, flags);
        __uart_start(tty);
        uart_port_unlock(port, flags);
@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
        upstat_t mask = UPSTAT_SYNC_FIFO;
        struct uart_port *port;
 
+       if (!state)
+               return;
+
        port = uart_port_ref(state);
        if (!port)
                return;
index 8df0fd8245203f8b5d77aa6f5f8666830ca47b01..64bbeb7d7e0c727e85fe8999ca56c3f03f5cee9f 100644 (file)
@@ -1921,7 +1921,7 @@ out_nomem:
 
 static void sci_free_irq(struct sci_port *port)
 {
-       int i;
+       int i, j;
 
        /*
         * Intentionally in reverse order so we iterate over the muxed
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
                if (unlikely(irq < 0))
                        continue;
 
+               /* Check if already freed (irq was muxed) */
+               for (j = 0; j < i; j++)
+                       if (port->irqs[j] == irq)
+                               j = i + 1;
+               if (j > i)
+                       continue;
+
                free_irq(port->irqs[i], port);
                kfree(port->irqstr[i]);
 
index cb7fcd7c0ad84608bf80431176ade8583212a64d..c1e9ea621f41365137b898011442eb3ebb5a595d 100644 (file)
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
        for (i = 0; i < exynos->num_clks; i++) {
                ret = clk_prepare_enable(exynos->clks[i]);
                if (ret) {
-                       while (--i > 0)
+                       while (i-- > 0)
                                clk_disable_unprepare(exynos->clks[i]);
                        return ret;
                }
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
        for (i = 0; i < exynos->num_clks; i++) {
                ret = clk_prepare_enable(exynos->clks[i]);
                if (ret) {
-                       while (--i > 0)
+                       while (i-- > 0)
                                clk_disable_unprepare(exynos->clks[i]);
                        return ret;
                }
index bed2ff42780b79dd2cdbc166c564e7fcb786d60a..6c9b76bcc2e173570b7fdae29769d4925b3c971d 100644 (file)
@@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
        unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
        unsigned int rem = length % maxp;
 
-       if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
+       if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
                struct dwc3     *dwc = dep->dwc;
                struct dwc3_trb *trb;
 
index 660878a195055b32568db74ca3716b9fbccbae12..b77f3126580ebb937986e7ced5b28739dd25dea4 100644 (file)
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
 #if defined(PLX_PCI_RDK2)
        /* see if PCI int for us by checking irqstat */
        intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
-       if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+       if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
                spin_unlock(&dev->lock);
                return IRQ_NONE;
        }
index eae8b1b1b45b864a545827b9c5403b88b8779f78..ffe462a657b15d8b592ac7801dc6f4be4629c86f 100644 (file)
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
        }
 
        if (request) {
-               u8      is_dma = 0;
-               bool    short_packet = false;
 
                trace_musb_req_tx(req);
 
                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
-                       is_dma = 1;
                        csr |= MUSB_TXCSR_P_WZC_BITS;
                        csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
                                 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                 */
                if ((request->zero && request->length)
                        && (request->length % musb_ep->packet_sz == 0)
-                       && (request->actual == request->length))
-                               short_packet = true;
+                       && (request->actual == request->length)) {
 
-               if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
-                       (is_dma && (!dma->desired_mode ||
-                               (request->actual &
-                                       (musb_ep->packet_sz - 1)))))
-                               short_packet = true;
-
-               if (short_packet) {
                        /*
                         * On DMA completion, FIFO may not be
                         * available yet...
index a688f7f87829f760c9fdee9008f6ca365e342dd3..5fc6825745f21bd6246d71b99fd98d7313396f01 100644 (file)
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
                                channel->status = MUSB_DMA_STATUS_FREE;
 
                                /* completed */
-                               if ((devctl & MUSB_DEVCTL_HM)
-                                       && (musb_channel->transmit)
-                                       && ((channel->desired_mode == 0)
-                                           || (channel->actual_len &
-                                           (musb_channel->max_packet_sz - 1)))
-                                   ) {
+                               if (musb_channel->transmit &&
+                                       (!channel->desired_mode ||
+                                       (channel->actual_len %
+                                           musb_channel->max_packet_sz))) {
                                        u8  epnum  = musb_channel->epnum;
                                        int offset = musb->io.ep_offset(epnum,
                                                                    MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
                                         */
                                        musb_ep_select(mbase, epnum);
                                        txcsr = musb_readw(mbase, offset);
-                                       txcsr &= ~(MUSB_TXCSR_DMAENAB
+                                       if (channel->desired_mode == 1) {
+                                               txcsr &= ~(MUSB_TXCSR_DMAENAB
                                                        | MUSB_TXCSR_AUTOSET);
-                                       musb_writew(mbase, offset, txcsr);
-                                       /* Send out the packet */
-                                       txcsr &= ~MUSB_TXCSR_DMAMODE;
+                                               musb_writew(mbase, offset, txcsr);
+                                               /* Send out the packet */
+                                               txcsr &= ~MUSB_TXCSR_DMAMODE;
+                                               txcsr |= MUSB_TXCSR_DMAENAB;
+                                       }
                                        txcsr |=  MUSB_TXCSR_TXPKTRDY;
                                        musb_writew(mbase, offset, txcsr);
                                }
index d7312eed60882a7c356b0d6736c5df836d2dbaf0..91ea3083e7ad97793872fbb9459ab205fd9b5161 100644 (file)
@@ -21,7 +21,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
        depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
index 27bdb72225272394d1d1001dc8d50e17c0186601..f5f0568d8533e111a6d1287d0ef39916005efbb3 100644 (file)
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
-       if (ret)
-               return ret;
        am_phy->usb_phy_gen.phy.init = am335x_init;
        am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
 
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
        device_set_wakeup_enable(dev, false);
        phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
 
-       return 0;
+       return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
 }
 
 static int am335x_phy_remove(struct platform_device *pdev)
index 4bc29b5866980ffa3486516670b4bf15812dd21a..f1c39a3c753434666b033e1ab47fb1033b5fd259 100644 (file)
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
                                              pdo_pps_apdo_max_voltage(snk));
                port->pps_data.max_curr = min_pps_apdo_current(src, snk);
                port->pps_data.out_volt = min(port->pps_data.max_volt,
-                                             port->pps_data.out_volt);
+                                             max(port->pps_data.min_volt,
+                                                 port->pps_data.out_volt));
                port->pps_data.op_curr = min(port->pps_data.max_curr,
                                             port->pps_data.op_curr);
        }
index cd7e755484e3be873cbcc73369a2ddadf1c4f376..a0b07c3312550463b689acf9aa8d524ac5ad7d89 100644 (file)
@@ -152,7 +152,12 @@ struct vring_virtqueue {
                /* Available for packed ring */
                struct {
                        /* Actual memory layout for this queue. */
-                       struct vring_packed vring;
+                       struct {
+                               unsigned int num;
+                               struct vring_packed_desc *desc;
+                               struct vring_packed_desc_event *driver;
+                               struct vring_packed_desc_event *device;
+                       } vring;
 
                        /* Driver ring wrap counter. */
                        bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
                !context;
        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 
+       if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+               vq->weak_barriers = false;
+
        vq->packed.ring_dma_addr = ring_dma_addr;
        vq->packed.driver_event_dma_addr = driver_event_dma_addr;
        vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
                !context;
        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 
+       if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
+               vq->weak_barriers = false;
+
        vq->split.queue_dma_addr = 0;
        vq->split.queue_size_in_bytes = 0;
 
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
                        break;
                case VIRTIO_F_RING_PACKED:
                        break;
+               case VIRTIO_F_ORDER_PLATFORM:
+                       break;
                default:
                        /* We don't understand this bit. */
                        __virtio_clear_bit(vdev, i);
index b906ff70c90f809ba1d7a4e6f38019c0df4da4e8..aaaaf4d12c7394fe1bb9836516b7b7fc1eb69105 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
        if (unlikely(!req->ki_filp))
                return -EBADF;
        req->ki_complete = aio_complete_rw;
+       req->private = NULL;
        req->ki_pos = iocb->aio_offset;
        req->ki_flags = iocb_flags(req->ki_filp);
        if (iocb->aio_flags & IOCB_FLAG_RESFD)
index 52d024bfdbc12142a97ff8ec204ce4b658df6fe6..48318fb74938a458184c7b77598994915474ade7 100644 (file)
@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        struct buffer_head *head;
        struct page *page;
        int all_mapped = 1;
+       static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
 
        index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
        page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
         * file io on the block device and getblk.  It gets dealt with
         * elsewhere, don't buffer_error if we had some unmapped buffers
         */
-       if (all_mapped) {
-               printk("__find_get_block_slow() failed. "
-                       "block=%llu, b_blocknr=%llu\n",
-                       (unsigned long long)block,
-                       (unsigned long long)bh->b_blocknr);
-               printk("b_state=0x%08lx, b_size=%zu\n",
-                       bh->b_state, bh->b_size);
-               printk("device %pg blocksize: %d\n", bdev,
-                       1 << bd_inode->i_blkbits);
+       ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+       if (all_mapped && __ratelimit(&last_warned)) {
+               printk("__find_get_block_slow() failed. block=%llu, "
+                      "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+                      "device %pg blocksize: %d\n",
+                      (unsigned long long)block,
+                      (unsigned long long)bh->b_blocknr,
+                      bh->b_state, bh->b_size, bdev,
+                      1 << bd_inode->i_blkbits);
        }
 out_unlock:
        spin_unlock(&bd_mapping->private_lock);
index 13b01351dd1cb3f8381dbddd2ee70717792b9ac7..29c68c5d44d5f179c17676c83be04ed1a3ade190 100644 (file)
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
        inode_unlock(d_inode(dentry->d_parent));
        dput(dentry);
        simple_release_fs(&debugfs_mount, &debugfs_mount_count);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 static struct dentry *end_creating(struct dentry *dentry)
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
        dentry = start_creating(name, parent);
 
        if (IS_ERR(dentry))
-               return NULL;
+               return dentry;
 
        inode = debugfs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode,
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %NULL will be returned.
+ * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %NULL will be returned.
+ * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the file is
  * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.)  If an error occurs, %NULL will be returned.
+ * you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR) will be
+ * returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
        struct inode *inode;
 
        if (IS_ERR(dentry))
-               return NULL;
+               return dentry;
 
        inode = debugfs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name,
        struct inode *inode;
 
        if (IS_ERR(dentry))
-               return NULL;
+               return dentry;
 
        inode = debugfs_get_inode(dentry->d_sb);
        if (unlikely(!inode))
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount);
  * This function will return a pointer to a dentry if it succeeds.  This
  * pointer must be passed to the debugfs_remove() function when the symbolic
  * link is to be removed (no automatic cleanup happens if your module is
- * unloaded, you are responsible here.)  If an error occurs, %NULL will be
- * returned.
+ * unloaded, you are responsible here.)  If an error occurs, %ERR_PTR(-ERROR)
+ * will be returned.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
        struct inode *inode;
        char *link = kstrdup(target, GFP_KERNEL);
        if (!link)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        dentry = start_creating(name, parent);
        if (IS_ERR(dentry)) {
                kfree(link);
-               return NULL;
+               return dentry;
        }
 
        inode = debugfs_get_inode(dentry->d_sb);
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
        struct dentry *dentry = NULL, *trap;
        struct name_snapshot old_name;
 
+       if (IS_ERR(old_dir))
+               return old_dir;
+       if (IS_ERR(new_dir))
+               return new_dir;
+       if (IS_ERR_OR_NULL(old_dentry))
+               return old_dentry;
+
        trap = lock_rename(new_dir, old_dir);
        /* Source or destination directories don't exist? */
        if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
@@ -820,7 +830,9 @@ exit:
        if (dentry && !IS_ERR(dentry))
                dput(dentry);
        unlock_rename(new_dir, old_dir);
-       return NULL;
+       if (IS_ERR(dentry))
+               return dentry;
+       return ERR_PTR(-EINVAL);
 }
 EXPORT_SYMBOL_GPL(debugfs_rename);
 
index a5e516a40e7a359cdae8b2bf289175e971b6e6c9..809c0f2f9942e372d67ddd7e45e6cee2b344414b 100644 (file)
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->in.h.nodeid = outarg->nodeid;
        req->in.numargs = 2;
        req->in.argpages = 1;
-       req->page_descs[0].offset = offset;
        req->end = fuse_retrieve_end;
 
        index = outarg->offset >> PAGE_SHIFT;
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                this_num = min_t(unsigned, num, PAGE_SIZE - offset);
                req->pages[req->num_pages] = page;
+               req->page_descs[req->num_pages].offset = offset;
                req->page_descs[req->num_pages].length = this_num;
                req->num_pages++;
 
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
 
        ret = fuse_dev_do_write(fud, &cs, len);
 
+       pipe_lock(pipe);
        for (idx = 0; idx < nbuf; idx++)
                pipe_buf_release(pipe, &bufs[idx]);
+       pipe_unlock(pipe);
 
 out:
        kvfree(bufs);
index ffaffe18352a1bb53708f3ac2c0f2638a24337ab..a59c16bd90accaa2f49d924fd232dfd265be1f98 100644 (file)
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
                spin_unlock(&fc->lock);
 
                dec_wb_stat(&bdi->wb, WB_WRITEBACK);
-               dec_node_page_state(page, NR_WRITEBACK_TEMP);
+               dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
                wb_writeout_inc(&bdi->wb);
                fuse_writepage_free(fc, new_req);
                fuse_request_free(new_req);
index 76baaa6be3934eef20c1c2a327a4f3b10c9bffed..c2d4099429be0d73c32c87a82dc1a939e89d009b 100644 (file)
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
        get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
        fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
        fc->user_ns = get_user_ns(user_ns);
+       fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
 }
 EXPORT_SYMBOL_GPL(fuse_conn_init);
 
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        fc->user_id = d.user_id;
        fc->group_id = d.group_id;
        fc->max_read = max_t(unsigned, 4096, d.max_read);
-       fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
 
        /* Used by get_root_inode() */
        sb->s_fs_info = fc;
index 9824e32b2f2345312dad32ea207885344313fbb8..7dc98e14655df9af314cfec2f3df1ba3fbb32d10 100644 (file)
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
        loff_t cloned;
 
        cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
+       if (cloned < 0)
+               return nfserrno(cloned);
        if (count && cloned != count)
-               cloned = -EINVAL;
-       return nfserrno(cloned < 0 ? cloned : 0);
+               return nfserrno(-EINVAL);
+       return 0;
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
index 1c8eecfe52b85688b48fd2507ceeeee18fe64878..6acf1bfa0bfee57e6d5f105e47797c4126d9d88b 100644 (file)
@@ -768,18 +768,23 @@ xrep_findroot_block(
                if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
                                &mp->m_sb.sb_meta_uuid))
                        goto out;
+               /*
+                * Read verifiers can reference b_ops, so we set the pointer
+                * here.  If the verifier fails we'll reset the buffer state
+                * to what it was before we touched the buffer.
+                */
+               bp->b_ops = fab->buf_ops;
                fab->buf_ops->verify_read(bp);
                if (bp->b_error) {
+                       bp->b_ops = NULL;
                        bp->b_error = 0;
                        goto out;
                }
 
                /*
                 * Some read verifiers will (re)set b_ops, so we must be
-                * careful not to blow away any such assignment.
+                * careful not to change b_ops after running the verifier.
                 */
-               if (!bp->b_ops)
-                       bp->b_ops = fab->buf_ops;
        }
 
        /*
index 338b9d9984e04a62d790ae72638017c9ac3329d5..d9048bcea49c5203c6d89186637d63fb33f69c37 100644 (file)
@@ -449,6 +449,7 @@ xfs_map_blocks(
        }
 
        wpc->imap = imap;
+       xfs_trim_extent_eof(&wpc->imap, ip);
        trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
        return 0;
 allocate_blocks:
@@ -459,6 +460,7 @@ allocate_blocks:
        ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
               imap.br_startoff + imap.br_blockcount <= cow_fsb);
        wpc->imap = imap;
+       xfs_trim_extent_eof(&wpc->imap, ip);
        trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
        return 0;
 }
index eedc5e0156ff1cacaf33f18cc3a34a03e6c94311..4f5f2ff3f70f944130f94a674f09f464d2b6c970 100644 (file)
@@ -776,10 +776,26 @@ _xfs_buf_read(
 }
 
 /*
+ * Set buffer ops on an unchecked buffer and validate it, if possible.
+ *
  * If the caller passed in an ops structure and the buffer doesn't have ops
  * assigned, set the ops and use them to verify the contents.  If the contents
  * cannot be verified, we'll clear XBF_DONE.  We assume the buffer has no
  * recorded errors and is already in XBF_DONE state.
+ *
+ * Under normal operations, every in-core buffer must have buffer ops assigned
+ * to them when the buffer is read in from disk so that we can validate the
+ * metadata.
+ *
+ * However, there are two scenarios where one can encounter in-core buffers
+ * that don't have buffer ops.  The first is during log recovery of buffers on
+ * a V4 filesystem, though these buffers are purged at the end of recovery.
+ *
+ * The other is online repair, which tries to match arbitrary metadata blocks
+ * with btree types in order to find the root.  If online repair doesn't match
+ * the buffer with /any/ btree type, the buffer remains in memory in DONE state
+ * with no ops, and a subsequent read_buf call from elsewhere will not set the
+ * ops.  This function helps us fix this situation.
  */
 int
 xfs_buf_ensure_ops(
@@ -1536,8 +1552,7 @@ __xfs_buf_submit(
                xfs_buf_ioerror(bp, -EIO);
                bp->b_flags &= ~XBF_DONE;
                xfs_buf_stale(bp);
-               if (bp->b_flags & XBF_ASYNC)
-                       xfs_buf_ioend(bp);
+               xfs_buf_ioend(bp);
                return -EIO;
        }
 
index b53be41929bec766c4a0c4dc564802a893f89e64..04f7ac345984fa1752529d15b1205505f17a390f 100644 (file)
 #define IMX8MQ_CLK_VPU_G2_ROOT                 241
 
 /* SCCG PLL GATE */
-#define IMX8MQ_SYS1_PLL_OUT                    232
+#define IMX8MQ_SYS1_PLL_OUT                    242
 #define IMX8MQ_SYS2_PLL_OUT                    243
 #define IMX8MQ_SYS3_PLL_OUT                    244
 #define IMX8MQ_DRAM_PLL_OUT                    245
 /* txesc clock */
 #define IMX8MQ_CLK_DSI_IPG_DIV                  256
 
-#define IMX8MQ_CLK_TMU_ROOT                    265
+#define IMX8MQ_CLK_TMU_ROOT                    257
 
 /* Display root clocks */
-#define IMX8MQ_CLK_DISP_AXI_ROOT               266
-#define IMX8MQ_CLK_DISP_APB_ROOT               267
-#define IMX8MQ_CLK_DISP_RTRM_ROOT              268
+#define IMX8MQ_CLK_DISP_AXI_ROOT               258
+#define IMX8MQ_CLK_DISP_APB_ROOT               259
+#define IMX8MQ_CLK_DISP_RTRM_ROOT              260
 
-#define IMX8MQ_CLK_OCOTP_ROOT                  269
+#define IMX8MQ_CLK_OCOTP_ROOT                  261
 
-#define IMX8MQ_CLK_DRAM_ALT_ROOT               270
-#define IMX8MQ_CLK_DRAM_CORE                   271
+#define IMX8MQ_CLK_DRAM_ALT_ROOT               262
+#define IMX8MQ_CLK_DRAM_CORE                   263
 
-#define IMX8MQ_CLK_MU_ROOT                     272
-#define IMX8MQ_VIDEO2_PLL_OUT                  273
+#define IMX8MQ_CLK_MU_ROOT                     264
+#define IMX8MQ_VIDEO2_PLL_OUT                  265
 
-#define IMX8MQ_CLK_CLKO2                       274
+#define IMX8MQ_CLK_CLKO2                       266
 
-#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK  275
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK  267
 
-#define IMX8MQ_CLK_END                         276
+#define IMX8MQ_CLK_END                         268
 #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
index 8804753805ac554f3b0590b19d43f2509a7aea2c..7bb2d8de9f308e367bcda4a5484f67483f8fc8e5 100644 (file)
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
 
 static inline sector_t blk_rq_trace_sector(struct request *rq)
 {
-       return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq);
+       /*
+        * Tracing should ignore starting sector for passthrough requests and
+        * requests where starting sector didn't get set.
+        */
+       if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1)
+               return 0;
+       return blk_rq_pos(rq);
 }
 
 static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq)
index ad106d845b2290a106765b96cab3cdd555dc9211..e532fcc6e4b5a19c0633c7c4d027205b5a3abdc7 100644 (file)
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
        return qdisc_skb_cb(skb)->data;
 }
 
-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
-                                      struct sk_buff *skb)
+static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
+                                        struct sk_buff *skb)
 {
        u8 *cb_data = bpf_skb_cb(skb);
        u8 cb_saved[BPF_SKB_CB_LEN];
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
        return res;
 }
 
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+                                      struct sk_buff *skb)
+{
+       u32 res;
+
+       preempt_disable();
+       res = __bpf_prog_run_save_cb(prog, skb);
+       preempt_enable();
+       return res;
+}
+
 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
                                        struct sk_buff *skb)
 {
        u8 *cb_data = bpf_skb_cb(skb);
+       u32 res;
 
        if (unlikely(prog->cb_access))
                memset(cb_data, 0, BPF_SKB_CB_LEN);
 
-       return BPF_PROG_RUN(prog, skb);
+       preempt_disable();
+       res = BPF_PROG_RUN(prog, skb);
+       preempt_enable();
+       return res;
 }
 
 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
index 8663f216c563e9023df067c83ec9b6e5fbcbe02b..2d6100edf2049d6195b13f03729ac1165f8c192b 100644 (file)
 
 #ifdef CONFIG_DEBUG_FS
 
+#include <linux/kfifo.h>
+
 #define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
 
 void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
 void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
 void hid_debug_exit(void);
 void hid_debug_event(struct hid_device *, char *);
 
-
 struct hid_debug_list {
-       char *hid_debug_buf;
-       int head;
-       int tail;
+       DECLARE_KFIFO_PTR(hid_debug_fifo, char);
        struct fasync_struct *fasync;
        struct hid_device *hdev;
        struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
 #endif
 
 #endif
-
index 071b4cbdf0104796d962446a759beea76790ff28..c848a7cc502ee54ac1aea1452a23b41c6fb247af 100644 (file)
 #define GITS_TYPER_PLPIS               (1UL << 0)
 #define GITS_TYPER_VLPIS               (1UL << 1)
 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT        4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r)   ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE(r)   ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
 #define GITS_TYPER_IDBITS_SHIFT                8
 #define GITS_TYPER_DEVBITS_SHIFT       13
 #define GITS_TYPER_DEVBITS(r)          ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
index 1377d085ef99d64bd2eb07b93f57bb07ef7986ee..86dbb3e29139591d046602003cd7cb115f8b7e1a 100644 (file)
@@ -1483,6 +1483,7 @@ struct net_device_ops {
  * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
  * @IFF_FAILOVER: device is a failover master device
  * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags {
        IFF_NO_RX_HANDLER               = 1<<26,
        IFF_FAILOVER                    = 1<<27,
        IFF_FAILOVER_SLAVE              = 1<<28,
+       IFF_L3MDEV_RX_HANDLER           = 1<<29,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags {
 #define IFF_NO_RX_HANDLER              IFF_NO_RX_HANDLER
 #define IFF_FAILOVER                   IFF_FAILOVER
 #define IFF_FAILOVER_SLAVE             IFF_FAILOVER_SLAVE
+#define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
 
 /**
  *     struct net_device - The DEVICE structure.
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
        return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+       return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
 static inline bool netif_is_l3_master(const struct net_device *dev)
 {
        return dev->priv_flags & IFF_L3MDEV_MASTER;
index cc7e2c1cd44465926c3626a014dbd051319c2a64..9702016734b1ad7e53b204511b1167e05bc6b858 100644 (file)
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig);
 #endif
 
 #define siginmask(sig, mask) \
-       ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
+       ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask)))
 
 #define SIG_KERNEL_ONLY_MASK (\
        rt_sigmask(SIGKILL)   |  rt_sigmask(SIGSTOP))
index 7ddfc65586b046e80e3ae7357368b696b406507f..4335bd771ce57b40776bcaf875f52df09c8e6233 100644 (file)
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
        struct clk *pclk;
        struct clk *clk_ptp_ref;
        unsigned int clk_ptp_rate;
+       unsigned int clk_ref_rate;
        struct reset_control *stmmac_rst;
        struct stmmac_axi *axi;
        int has_gmac4;
index 78fa0ac4613c3946590677f53d946d37d97f7bde..5175fd63cd8278fcb9e54cf184b10d6d86bdb528 100644 (file)
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
 
        if (netif_is_l3_slave(skb->dev))
                master = netdev_master_upper_dev_get_rcu(skb->dev);
-       else if (netif_is_l3_master(skb->dev))
+       else if (netif_is_l3_master(skb->dev) ||
+                netif_has_l3_rx_handler(skb->dev))
                master = skb->dev;
 
        if (master && master->l3mdev_ops->l3mdev_l3_rcv)
index 841835a387e17849155ae85f63b3197e747f3469..b4984bbbe15713da8c0fb7f6c58c6c7ea1520f00 100644 (file)
@@ -469,9 +469,7 @@ struct nft_set_binding {
 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                       struct nft_set_binding *binding);
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding);
-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding);
+                         struct nft_set_binding *binding, bool commit);
 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
 
 /**
@@ -721,6 +719,13 @@ struct nft_expr_type {
 #define NFT_EXPR_STATEFUL              0x1
 #define NFT_EXPR_GC                    0x2
 
+enum nft_trans_phase {
+       NFT_TRANS_PREPARE,
+       NFT_TRANS_ABORT,
+       NFT_TRANS_COMMIT,
+       NFT_TRANS_RELEASE
+};
+
 /**
  *     struct nft_expr_ops - nf_tables expression operations
  *
@@ -750,7 +755,8 @@ struct nft_expr_ops {
        void                            (*activate)(const struct nft_ctx *ctx,
                                                    const struct nft_expr *expr);
        void                            (*deactivate)(const struct nft_ctx *ctx,
-                                                     const struct nft_expr *expr);
+                                                     const struct nft_expr *expr,
+                                                     enum nft_trans_phase phase);
        void                            (*destroy)(const struct nft_ctx *ctx,
                                                   const struct nft_expr *expr);
        void                            (*destroy_clone)(const struct nft_ctx *ctx,
@@ -1323,12 +1329,15 @@ struct nft_trans_rule {
 struct nft_trans_set {
        struct nft_set                  *set;
        u32                             set_id;
+       bool                            bound;
 };
 
 #define nft_trans_set(trans)   \
        (((struct nft_trans_set *)trans->data)->set)
 #define nft_trans_set_id(trans)        \
        (((struct nft_trans_set *)trans->data)->set_id)
+#define nft_trans_set_bound(trans)     \
+       (((struct nft_trans_set *)trans->data)->bound)
 
 struct nft_trans_chain {
        bool                            update;
index 0cdc3999ecfa84ebb289eca932b33d1806c2fa13..c5188ff724d1220b991e7e2c1591f8f396138796 100644 (file)
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       if (stream->direction == SND_COMPRESS_PLAYBACK)
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       else
+               stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+
        wake_up(&stream->runtime->sleep);
 }
 
index 7fa48b100936183464d17c9eb3c42711d28336be..cc7c8d42d4fd9a8a494f6b0cf59d48a8fbf2f902 100644 (file)
@@ -68,6 +68,7 @@ struct hda_bus {
        unsigned int response_reset:1;  /* controller was reset */
        unsigned int in_reset:1;        /* during reset operation */
        unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+       unsigned int bus_probing :1;    /* during probing process */
 
        int primary_dig_out_type;       /* primary digital out PCM type */
        unsigned int mixer_assigned;    /* codec addr for mixer name */
index 1196e1c1d4f6c494ee95b8d679e90a7def6d66b4..ff8e7dc9d4dd22f4ce6f98b77fe34142bf2ec63b 100644 (file)
 /* This feature indicates support for the packed virtqueue layout. */
 #define VIRTIO_F_RING_PACKED           34
 
+/*
+ * This feature indicates that memory accesses by the driver and the
+ * device are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM                36
+
 /*
  * Does the device support Single Root I/O Virtualization?
  */
index 2414f8af26b3ee882be9acc96fa273b33befa317..4c4e24c291a5b0f7c44453b3d7bb9630246ed81f 100644 (file)
@@ -213,14 +213,4 @@ struct vring_packed_desc {
        __le16 flags;
 };
 
-struct vring_packed {
-       unsigned int num;
-
-       struct vring_packed_desc *desc;
-
-       struct vring_packed_desc_event *driver;
-
-       struct vring_packed_desc_event *device;
-};
-
 #endif /* _UAPI_LINUX_VIRTIO_RING_H */
index befe570be5ba27514e2be51609d3a53db06f07c7..c57bd10340ed8dd9484ce716835b87a68099b481 100644 (file)
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
 
                /* "typedef void new_void", "const void"...etc */
                if (!btf_type_is_void(next_type) &&
-                   !btf_type_is_fwd(next_type)) {
+                   !btf_type_is_fwd(next_type) &&
+                   !btf_type_is_func_proto(next_type)) {
                        btf_verifier_log_type(env, v->t, "Invalid type_id");
                        return -EINVAL;
                }
index ab612fe9862f0668b566722c413d366d598f10ca..d17d05570a3fd4eead90b4806362b66b6cb8bea4 100644 (file)
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
        bpf_compute_and_save_data_end(skb, &saved_data_end);
 
        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
-                                bpf_prog_run_save_cb);
+                                __bpf_prog_run_save_cb);
        bpf_restore_data_end(skb, saved_data_end);
        __skb_pull(skb, offset);
        skb->sk = save_sk;
index 4b7c76765d9d6998cdbb273ced0d30770d3f6b1e..f9274114c88d37d889d0337dd1f3247d6f40a7e2 100644 (file)
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
        }
 
        if (htab_is_prealloc(htab)) {
-               pcpu_freelist_push(&htab->freelist, &l->fnode);
+               __pcpu_freelist_push(&htab->freelist, &l->fnode);
        } else {
                atomic_dec(&htab->count);
                l->htab = htab;
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                } else {
                        struct pcpu_freelist_node *l;
 
-                       l = pcpu_freelist_pop(&htab->freelist);
+                       l = __pcpu_freelist_pop(&htab->freelist);
                        if (!l)
                                return ERR_PTR(-E2BIG);
                        l_new = container_of(l, struct htab_elem, fnode);
index 673fa6fe2d73cf815daf8a0c2eb0ce6eab15b41e..0c1b4ba9e90e755f7cef80e1267ed2c9557d5354 100644 (file)
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
        free_percpu(s->freelist);
 }
 
-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
-                                       struct pcpu_freelist_node *node)
+static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+                                        struct pcpu_freelist_node *node)
 {
        raw_spin_lock(&head->lock);
        node->next = head->first;
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
        raw_spin_unlock(&head->lock);
 }
 
-void pcpu_freelist_push(struct pcpu_freelist *s,
+void __pcpu_freelist_push(struct pcpu_freelist *s,
                        struct pcpu_freelist_node *node)
 {
        struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
 
-       __pcpu_freelist_push(head, node);
+       ___pcpu_freelist_push(head, node);
+}
+
+void pcpu_freelist_push(struct pcpu_freelist *s,
+                       struct pcpu_freelist_node *node)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __pcpu_freelist_push(s, node);
+       local_irq_restore(flags);
 }
 
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
        for_each_possible_cpu(cpu) {
 again:
                head = per_cpu_ptr(s->freelist, cpu);
-               __pcpu_freelist_push(head, buf);
+               ___pcpu_freelist_push(head, buf);
                i++;
                buf += elem_size;
                if (i == nr_elems)
@@ -74,14 +84,12 @@ again:
        local_irq_restore(flags);
 }
 
-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
 {
        struct pcpu_freelist_head *head;
        struct pcpu_freelist_node *node;
-       unsigned long flags;
        int orig_cpu, cpu;
 
-       local_irq_save(flags);
        orig_cpu = cpu = raw_smp_processor_id();
        while (1) {
                head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
                node = head->first;
                if (node) {
                        head->first = node->next;
-                       raw_spin_unlock_irqrestore(&head->lock, flags);
+                       raw_spin_unlock(&head->lock);
                        return node;
                }
                raw_spin_unlock(&head->lock);
                cpu = cpumask_next(cpu, cpu_possible_mask);
                if (cpu >= nr_cpu_ids)
                        cpu = 0;
-               if (cpu == orig_cpu) {
-                       local_irq_restore(flags);
+               if (cpu == orig_cpu)
                        return NULL;
-               }
        }
 }
+
+struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+       struct pcpu_freelist_node *ret;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       ret = __pcpu_freelist_pop(s);
+       local_irq_restore(flags);
+       return ret;
+}
index 3049aae8ea1e2214b888ee40b550d7646da63752..c3960118e617881644f1c726796cc1071face893 100644 (file)
@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
        struct pcpu_freelist_node *next;
 };
 
+/* pcpu_freelist_* do spin_lock_irqsave. */
 void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
+/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
+void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
                            u32 nr_elems);
 int pcpu_freelist_init(struct pcpu_freelist *);
index b155cd17c1bd77d6c40e215a09ef2af37bd2a077..8577bb7f8be6739f143667af19f916b04de01126 100644 (file)
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr)
 
        if (bpf_map_is_dev_bound(map)) {
                err = bpf_map_offload_lookup_elem(map, key, value);
-       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
-                  map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+               goto done;
+       }
+
+       preempt_disable();
+       this_cpu_inc(bpf_prog_active);
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
                err = bpf_percpu_hash_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
                err = bpf_percpu_array_copy(map, key, value);
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr)
                }
                rcu_read_unlock();
        }
+       this_cpu_dec(bpf_prog_active);
+       preempt_enable();
 
+done:
        if (err)
                goto free_value;
 
index 4a99370763319dd14d39c7547e4af930779dc38f..309ef5a64af5d03daa402749ea12fc12385ec4be 100644 (file)
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
        size = sizeof(struct ring_buffer);
        size += nr_pages * sizeof(void *);
 
+       if (order_base_2(size) >= MAX_ORDER)
+               goto fail;
+
        rb = kzalloc(size, GFP_KERNEL);
        if (!rb)
                goto fail;
index fdd312da09927ad43e2b6dabd7b089d3c2e8393b..a0514e01c3eb0c87ecb854c4c19fa0f9a64d6408 100644 (file)
@@ -2221,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
         * decrement the counter at queue_unlock() when some error has
         * occurred and we don't end up adding the task to the list.
         */
-       hb_waiters_inc(hb);
+       hb_waiters_inc(hb); /* implies smp_mb(); (A) */
 
        q->lock_ptr = &hb->lock;
 
-       spin_lock(&hb->lock); /* implies smp_mb(); (A) */
+       spin_lock(&hb->lock);
        return hb;
 }
 
@@ -2861,35 +2861,39 @@ retry_private:
         * and BUG when futex_unlock_pi() interleaves with this.
         *
         * Therefore acquire wait_lock while holding hb->lock, but drop the
-        * latter before calling rt_mutex_start_proxy_lock(). This still fully
-        * serializes against futex_unlock_pi() as that does the exact same
-        * lock handoff sequence.
+        * latter before calling __rt_mutex_start_proxy_lock(). This
+        * interleaves with futex_unlock_pi() -- which does a similar lock
+        * handoff -- such that the latter can observe the futex_q::pi_state
+        * before __rt_mutex_start_proxy_lock() is done.
         */
        raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
        spin_unlock(q.lock_ptr);
+       /*
+        * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+        * such that futex_unlock_pi() is guaranteed to observe the waiter when
+        * it sees the futex_q::pi_state.
+        */
        ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
        raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
 
        if (ret) {
                if (ret == 1)
                        ret = 0;
-
-               spin_lock(q.lock_ptr);
-               goto no_block;
+               goto cleanup;
        }
 
-
        if (unlikely(to))
                hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
 
        ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
 
+cleanup:
        spin_lock(q.lock_ptr);
        /*
-        * If we failed to acquire the lock (signal/timeout), we must
+        * If we failed to acquire the lock (deadlock/signal/timeout), we must
         * first acquire the hb->lock before removing the lock from the
-        * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
-        * wait lists consistent.
+        * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+        * lists consistent.
         *
         * In particular; it is important that futex_unlock_pi() can not
         * observe this inconsistency.
@@ -3013,6 +3017,10 @@ retry:
                 * there is no point where we hold neither; and therefore
                 * wake_futex_pi() must observe a state consistent with what we
                 * observed.
+                *
+                * In particular; this forces __rt_mutex_start_proxy() to
+                * complete such that we're guaranteed to observe the
+                * rt_waiter. Also see the WARN in wake_futex_pi().
                 */
                raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
                spin_unlock(&hb->lock);
index 581edcc63c2689f52eae98094f984b9fb3846bc8..978d63a8261c265e76eddcc1f933cd714e1b6e66 100644 (file)
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
        rt_mutex_set_owner(lock, NULL);
 }
 
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock:              the rt_mutex to take
+ * @waiter:            the pre-initialized rt_mutex_waiter
+ * @task:              the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ *  0 - task blocked on lock
+ *  1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
 int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                              struct rt_mutex_waiter *waiter,
                              struct task_struct *task)
 {
        int ret;
 
+       lockdep_assert_held(&lock->wait_lock);
+
        if (try_to_take_rt_mutex(lock, task, NULL))
                return 1;
 
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                ret = 0;
        }
 
-       if (unlikely(ret))
-               remove_waiter(lock, waiter);
-
        debug_rt_mutex_print_deadlock(waiter);
 
        return ret;
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
  * @waiter:            the pre-initialized rt_mutex_waiter
  * @task:              the task to prepare
  *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
  * Returns:
  *  0 - task blocked on lock
  *  1 - acquired the lock for task, caller should wake it up
  * <0 - error
  *
- * Special API call for FUTEX_REQUEUE_PI support.
+ * Special API call for PI-futex support.
  */
 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                              struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 
        raw_spin_lock_irq(&lock->wait_lock);
        ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+       if (unlikely(ret))
+               remove_waiter(lock, waiter);
        raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
  * @lock:              the rt_mutex we were woken on
  * @waiter:            the pre-initialized rt_mutex_waiter
  *
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
  *
  * Unless we acquired the lock; we're still enqueued on the wait-list and can
  * in fact still be granted ownership until we're removed. Therefore we can
index 04f248644e065f601d78744be2bbe580a6aa1810..9e0f52375487d7be47ee4c265ee0cbd5227186d0 100644 (file)
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
        dentry = chan->cb->create_buf_file(tmpname, chan->parent,
                                           S_IRUSR, buf,
                                           &chan->is_global);
+       if (IS_ERR(dentry))
+               dentry = NULL;
 
        kfree(tmpname);
 
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
                dentry = chan->cb->create_buf_file(NULL, NULL,
                                                   S_IRUSR, buf,
                                                   &chan->is_global);
-               if (WARN_ON(dentry))
+               if (IS_ERR_OR_NULL(dentry))
                        goto free_buf;
        }
 
index e1d7ad8e6ab179835d719d87aa9cb5c87c29339d..99fa8ff06fd940c8742af8046e96a82b78b1b9c6 100644 (file)
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
 }
 EXPORT_SYMBOL_GPL(dequeue_signal);
 
+static int dequeue_synchronous_signal(kernel_siginfo_t *info)
+{
+       struct task_struct *tsk = current;
+       struct sigpending *pending = &tsk->pending;
+       struct sigqueue *q, *sync = NULL;
+
+       /*
+        * Might a synchronous signal be in the queue?
+        */
+       if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+               return 0;
+
+       /*
+        * Return the first synchronous signal in the queue.
+        */
+       list_for_each_entry(q, &pending->list, list) {
+               /* Synchronous signals have a postive si_code */
+               if ((q->info.si_code > SI_USER) &&
+                   (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+                       sync = q;
+                       goto next;
+               }
+       }
+       return 0;
+next:
+       /*
+        * Check if there is another siginfo for the same signal.
+        */
+       list_for_each_entry_continue(q, &pending->list, list) {
+               if (q->info.si_signo == sync->info.si_signo)
+                       goto still_pending;
+       }
+
+       sigdelset(&pending->signal, sync->info.si_signo);
+       recalc_sigpending();
+still_pending:
+       list_del_init(&sync->list);
+       copy_siginfo(info, &sync->info);
+       __sigqueue_free(sync);
+       return info->si_signo;
+}
+
 /*
  * Tell a process that it has a new active signal..
  *
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
 
        result = TRACE_SIGNAL_DELIVERED;
        /*
-        * Skip useless siginfo allocation for SIGKILL SIGSTOP,
-        * and kernel threads.
+        * Skip useless siginfo allocation for SIGKILL and kernel threads.
         */
-       if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
+       if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
                goto out_set;
 
        /*
@@ -2394,6 +2435,11 @@ relock:
                goto relock;
        }
 
+       /* Has this task already been marked for death? */
+       ksig->info.si_signo = signr = SIGKILL;
+       if (signal_group_exit(signal))
+               goto fatal;
+
        for (;;) {
                struct k_sigaction *ka;
 
@@ -2407,7 +2453,15 @@ relock:
                        goto relock;
                }
 
-               signr = dequeue_signal(current, &current->blocked, &ksig->info);
+               /*
+                * Signals generated by the execution of an instruction
+                * need to be delivered before any other pending signals
+                * so that the instruction pointer in the signal stack
+                * frame points to the faulting instruction.
+                */
+               signr = dequeue_synchronous_signal(&ksig->info);
+               if (!signr)
+                       signr = dequeue_signal(current, &current->blocked, &ksig->info);
 
                if (!signr)
                        break; /* will return 0 */
@@ -2489,6 +2543,7 @@ relock:
                        continue;
                }
 
+       fatal:
                spin_unlock_irq(&sighand->siglock);
 
                /*
index 8b068adb9da1ce23538eeb3bdf3fc7fbd94f2f59..f1a86a0d881ddb877b4a800bd4f31c30c76fab8e 100644 (file)
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
 
 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-       int err;
-
-       mutex_lock(&bpf_event_mutex);
-       err = __bpf_probe_register(btp, prog);
-       mutex_unlock(&bpf_event_mutex);
-       return err;
+       return __bpf_probe_register(btp, prog);
 }
 
 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
 {
-       int err;
-
-       mutex_lock(&bpf_event_mutex);
-       err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
-       mutex_unlock(&bpf_event_mutex);
-       return err;
+       return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
 }
 
 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
index e335576b941181614abd9eb8c15b0b710c3c0972..9bde07c06362fb9a00196358f333e2408583c78a 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright (C) IBM Corporation, 2010-2012
  * Author:     Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  */
-#define pr_fmt(fmt)    "trace_kprobe: " fmt
+#define pr_fmt(fmt)    "trace_uprobe: " fmt
 
 #include <linux/ctype.h>
 #include <linux/module.h>
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
        if (ret >= 0) {
                if (ret == maxlen)
                        dst[ret - 1] = '\0';
+               else
+                       /*
+                        * Include the terminating null byte. In this case it
+                        * was copied by strncpy_from_user but not accounted
+                        * for in ret.
+                        */
+                       ret++;
                *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
        }
 
index 6a8ac7626797854899e77afb4cff0505272dca97..e52f8cafe227d8b9a687b9d3cebd17dfacea50f4 100644 (file)
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
                                  int cnt, bool slow)
 {
-       struct rhltable rhlt;
+       struct rhltable *rhlt;
        unsigned int i, ret;
        const char *key;
        int err = 0;
 
-       err = rhltable_init(&rhlt, &test_rht_params_dup);
-       if (WARN_ON(err))
+       rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+       if (WARN_ON(!rhlt))
+               return -EINVAL;
+
+       err = rhltable_init(rhlt, &test_rht_params_dup);
+       if (WARN_ON(err)) {
+               kfree(rhlt);
                return err;
+       }
 
        for (i = 0; i < cnt; i++) {
                rhl_test_objects[i].value.tid = i;
-               key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+               key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
                key += test_rht_params_dup.key_offset;
 
                if (slow) {
-                       err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+                       err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
                                                             &rhl_test_objects[i].list_node.rhead));
                        if (err == -EAGAIN)
                                err = 0;
                } else
-                       err = rhltable_insert(&rhlt,
+                       err = rhltable_insert(rhlt,
                                              &rhl_test_objects[i].list_node,
                                              test_rht_params_dup);
                if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
                        goto skip_print;
        }
 
-       ret = print_ht(&rhlt);
+       ret = print_ht(rhlt);
        WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
 
 skip_print:
-       rhltable_destroy(&rhlt);
+       rhltable_destroy(rhlt);
+       kfree(rhlt);
 
        return 0;
 }
index e8090f099eb805626ffaa1445d3d0f8254f6c9a7..ef0dec20c7d87b283737ad659b244ec2769c1d45 100644 (file)
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 
                ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
 
+               /* free the TID stats immediately */
+               cfg80211_sinfo_release_content(&sinfo);
+
                dev_put(real_netdev);
                if (ret == -ENOENT) {
                        /* Node is not associated anymore! It would be
index 508f4416dfc9154494888adb5b76efa62673eeaa..415d494cbe223375650b80696f1288fccdd54973 100644 (file)
@@ -20,7 +20,6 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
        parent_dev = __dev_get_by_index((struct net *)parent_net,
                                        dev_get_iflink(net_dev));
        /* if we got a NULL parent_dev there is something broken.. */
-       if (WARN(!parent_dev, "Cannot find parent device"))
+       if (!parent_dev) {
+               pr_err("Cannot find parent device\n");
                return false;
+       }
 
        if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
                return false;
index 5db5a0a4c959b796c86315832f64073b5642e46a..b85ca809e509220b56492c34a9071c6c0d051f25 100644 (file)
@@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
 
        netif_trans_update(soft_iface);
        vid = batadv_get_vid(skb, 0);
+
+       skb_reset_mac_header(skb);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
index 7559d6835ecb7207c81e4f3f466ca5f8532334ee..7a54dc11ac2d3c7f4f81d8e9c34a3ae5a8cff4b4 100644 (file)
@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                /* Only some socketops are supported */
                switch (optname) {
                case SO_RCVBUF:
+                       val = min_t(u32, val, sysctl_rmem_max);
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
                        sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
                        break;
                case SO_SNDBUF:
+                       val = min_t(u32, val, sysctl_wmem_max);
                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
                        sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
                        break;
index d6d5c20d7044c88c82ef9b61d83abfcce1ad9d92..8c826603bf36313e2ee945e802003bc77355597b 100644 (file)
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
        struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
 
        /* No sk_callback_lock since already detached. */
-       if (psock->parser.enabled)
-               strp_done(&psock->parser.strp);
+       strp_done(&psock->parser.strp);
 
        cancel_work_sync(&psock->work);
 
index 6eb837a47b5c42f8c73c498525fe4d269fc5b97d..baaaeb2b2c4230edee893579d0ae5a755da3b2ee 100644 (file)
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
 static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
                                           u8 pkt, u8 opt, u8 *val, u8 len)
 {
-       if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+       if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
                return 0;
        return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
 }
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
 static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
                                           u8 pkt, u8 opt, u8 *val, u8 len)
 {
-       if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+       if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
                return 0;
        return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
 }
index 71bb15f491c81af1c911d4182d746e89095ae68e..54f5551fb79920ddefb37ee16b945e7e839f4696 100644 (file)
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
        rtnl_unlock();
 }
 
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int ret;
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        wmb();
 
        dev->dsa_ptr = cpu_dp;
+       lockdep_set_class(&dev->addr_list_lock,
+                         &dsa_master_addr_list_lock_key);
 
        ret = dsa_master_ethtool_setup(dev);
        if (ret)
index a3fcc1d016153c03bd2ec34a738e29b3ecdae9b0..a1c9fe155057299206683311c88c55e6f2cc0095 100644 (file)
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
 {
        struct net_device *master = dsa_slave_to_master(dev);
-
-       if (change & IFF_ALLMULTI)
-               dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
-       if (change & IFF_PROMISC)
-               dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
+       if (dev->flags & IFF_UP) {
+               if (change & IFF_ALLMULTI)
+                       dev_set_allmulti(master,
+                                        dev->flags & IFF_ALLMULTI ? 1 : -1);
+               if (change & IFF_PROMISC)
+                       dev_set_promiscuity(master,
+                                           dev->flags & IFF_PROMISC ? 1 : -1);
+       }
 }
 
 static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev && !dp->pl)
+       if (!dev->phydev || !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->set_mac_eee)
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev && !dp->pl)
+       if (!dev->phydev || !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->get_mac_eee)
index 20a64fe6254b0c134edfcc2faebd3eac00934e0a..3978f807fa8b7c8514f7727174facdb9812c9c59 100644 (file)
@@ -1455,12 +1455,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct ip_tunnel *t = netdev_priv(dev);
        struct ip_tunnel_parm *p = &t->parms;
+       __be16 o_flags = p->o_flags;
+
+       if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
+           !t->collect_md)
+               o_flags |= TUNNEL_KEY;
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
            nla_put_be16(skb, IFLA_GRE_IFLAGS,
                         gre_tnl_flags_to_gre_flags(p->i_flags)) ||
            nla_put_be16(skb, IFLA_GRE_OFLAGS,
-                        gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+                        gre_tnl_flags_to_gre_flags(o_flags)) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
            nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
index 4416368dbd49fe07d928104a759a58fb9cfedf15..801a9a0c217e8746b7dd2850b979702031fa81a3 100644 (file)
@@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
        struct __ip6_tnl_parm *p = &t->parms;
+       __be16 o_flags = p->o_flags;
+
+       if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
+           !p->collect_md)
+               o_flags |= TUNNEL_KEY;
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
            nla_put_be16(skb, IFLA_GRE_IFLAGS,
                         gre_tnl_flags_to_gre_flags(p->i_flags)) ||
            nla_put_be16(skb, IFLA_GRE_OFLAGS,
-                        gre_tnl_flags_to_gre_flags(p->o_flags)) ||
+                        gre_tnl_flags_to_gre_flags(o_flags)) ||
            nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
            nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
            nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
index 8b075f0bc35169b4098bda738950d631b62ec415..6d0b1f3e927bd75f07cc36fe7b087723e05b85b2 100644 (file)
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
        struct sock *sk = sk_to_full_sk(skb->sk);
        unsigned int hh_len;
        struct dst_entry *dst;
+       int strict = (ipv6_addr_type(&iph->daddr) &
+                     (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
        struct flowi6 fl6 = {
                .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
-                       rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
+                       strict ? skb_dst(skb)->dev->ifindex : 0,
                .flowi6_mark = skb->mark,
                .flowi6_uid = sock_net_uid(net, sk),
                .daddr = iph->daddr,
index 8181ee7e1e27051040bd3bbce9d6a228632a5297..ee5403cbe655ee2d51eef2cf4913ef39a36b67b1 100644 (file)
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
        } else {
                ip6_flow_hdr(hdr, 0, flowlabel);
                hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+               memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
        }
 
        hdr->nexthdr = NEXTHDR_ROUTING;
index 1e03305c0549220550d1e940c66fce967062aa89..e8a1dabef803e5583b0fb3bd70d75a48dad46e9c 100644 (file)
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
        }
 
        err = 0;
-       if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
+       if (__in6_dev_get(skb->dev) &&
+           !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
                goto out;
 
        if (t->parms.iph.daddr == 0)
index 26f1d435696a628aca844edb2a88f8b793839d91..fed6becc5daf86afa2ad9188bb28e151244bb5a6 100644 (file)
@@ -83,8 +83,7 @@
 #define L2TP_SLFLAG_S     0x40000000
 #define L2TP_SL_SEQ_MASK   0x00ffffff
 
-#define L2TP_HDR_SIZE_SEQ              10
-#define L2TP_HDR_SIZE_NOSEQ            6
+#define L2TP_HDR_SIZE_MAX              14
 
 /* Default trace flags */
 #define L2TP_DEFAULT_DEBUG_FLAGS       0
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
        __skb_pull(skb, sizeof(struct udphdr));
 
        /* Short packet? */
-       if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+       if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
                l2tp_info(tunnel, L2TP_MSG_DATA,
                          "%s: recv short packet (len=%d)\n",
                          tunnel->name, skb->len);
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
                goto error;
        }
 
+       if (tunnel->version == L2TP_HDR_VER_3 &&
+           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto error;
+
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
        l2tp_session_dec_refcount(session);
 
index 9c9afe94d389bb6fd0440e053043cb01fcd85528..b2ce90260c35f6d0cdd70241a28f54cedf580ea3 100644 (file)
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
 }
 #endif
 
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+                                              unsigned char **ptr, unsigned char **optr)
+{
+       int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+       if (opt_len > 0) {
+               int off = *ptr - *optr;
+
+               if (!pskb_may_pull(skb, off + opt_len))
+                       return -1;
+
+               if (skb->data != *optr) {
+                       *optr = skb->data;
+                       *ptr = skb->data + off;
+               }
+       }
+
+       return 0;
+}
+
 #define l2tp_printk(ptr, type, func, fmt, ...)                         \
 do {                                                                   \
        if (((ptr)->debug) & (type))                                    \
index 35f6f86d4dcce898f0deacbeba695cfe438cdd91..d4c60523c549d33c6ea29641a37a63040b70530d 100644 (file)
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
 
+       if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto discard_sess;
+
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
        l2tp_session_dec_refcount(session);
 
index 237f1a4a0b0c8b7fdc76cd67e7c682dee7bdaf79..0ae6899edac0705851df7306c3779033c4a42a37 100644 (file)
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
 
+       if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto discard_sess;
+
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
        l2tp_session_dec_refcount(session);
 
index f170d6c6629a0097f7bf3f874799049fdb39d046..928f13a208b058c199a5e679b7afd53a5e8e84db 100644 (file)
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
                                int head_need, bool may_encrypt)
 {
        struct ieee80211_local *local = sdata->local;
+       struct ieee80211_hdr *hdr;
+       bool enc_tailroom;
        int tail_need = 0;
 
-       if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
+       hdr = (struct ieee80211_hdr *) skb->data;
+       enc_tailroom = may_encrypt &&
+                      (sdata->crypto_tx_tailroom_needed_cnt ||
+                       ieee80211_is_mgmt(hdr->frame_control));
+
+       if (enc_tailroom) {
                tail_need = IEEE80211_ENCRYPT_TAILROOM;
                tail_need -= skb_tailroom(skb);
                tail_need = max_t(int, tail_need, 0);
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
 
        if (skb_cloned(skb) &&
            (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
-            !skb_clone_writable(skb, ETH_HLEN) ||
-            (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
+            !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
                I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
        else if (head_need || tail_need)
                I802_DEBUG_INC(local->tx_expand_skb_head);
index 741b533148baedd70cd4184ba47bb507b055c142..db4d46332e8681ab7ccbe1e920a14ecd3776f0d1 100644 (file)
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                }
 
                if (nf_ct_key_equal(h, tuple, zone, net)) {
+                       /* Tuple is taken already, so caller will need to find
+                        * a new source port to use.
+                        *
+                        * Only exception:
+                        * If the *original tuples* are identical, then both
+                        * conntracks refer to the same flow.
+                        * This is a rare situation, it can occur e.g. when
+                        * more than one UDP packet is sent from same socket
+                        * in different threads.
+                        *
+                        * Let nf_ct_resolve_clash() deal with this later.
+                        */
+                       if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+                                             &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+                               continue;
+
                        NF_CT_STAT_INC_ATOMIC(net, found);
                        rcu_read_unlock();
                        return 1;
index fb07f6cfc71917b13fcb7be0c4954b25f32b1a33..5a92f23f179fe0242016c7ec1deb6e6a5b4fbf4f 100644 (file)
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
        kfree(trans);
 }
 
+static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       struct net *net = ctx->net;
+       struct nft_trans *trans;
+
+       if (!nft_set_is_anonymous(set))
+               return;
+
+       list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
+               if (trans->msg_type == NFT_MSG_NEWSET &&
+                   nft_trans_set(trans) == set) {
+                       nft_trans_set_bound(trans) = true;
+                       break;
+               }
+       }
+}
+
 static int nf_tables_register_hook(struct net *net,
                                   const struct nft_table *table,
                                   struct nft_chain *chain)
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
        return err;
 }
 
-/* either expr ops provide both activate/deactivate, or neither */
-static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
-{
-       if (!ops)
-               return true;
-
-       if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
-               return false;
-
-       return true;
-}
-
 static void nft_rule_expr_activate(const struct nft_ctx *ctx,
                                   struct nft_rule *rule)
 {
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
 }
 
 static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
-                                    struct nft_rule *rule)
+                                    struct nft_rule *rule,
+                                    enum nft_trans_phase phase)
 {
        struct nft_expr *expr;
 
        expr = nft_expr_first(rule);
        while (expr != nft_expr_last(rule) && expr->ops) {
                if (expr->ops->deactivate)
-                       expr->ops->deactivate(ctx, expr);
+                       expr->ops->deactivate(ctx, expr, phase);
 
                expr = nft_expr_next(expr);
        }
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
                nft_trans_destroy(trans);
                return err;
        }
-       nft_rule_expr_deactivate(ctx, rule);
+       nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
 
        return 0;
 }
@@ -1929,9 +1935,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
  */
 int nft_register_expr(struct nft_expr_type *type)
 {
-       if (!nft_expr_check_ops(type->ops))
-               return -EINVAL;
-
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
                list_add_tail_rcu(&type->list, &nf_tables_expressions);
@@ -2079,10 +2082,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
                        err = PTR_ERR(ops);
                        goto err1;
                }
-               if (!nft_expr_check_ops(ops)) {
-                       err = -EINVAL;
-                       goto err1;
-               }
        } else
                ops = type->ops;
 
@@ -2511,7 +2510,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
 static void nf_tables_rule_release(const struct nft_ctx *ctx,
                                   struct nft_rule *rule)
 {
-       nft_rule_expr_deactivate(ctx, rule);
+       nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
        nf_tables_rule_destroy(ctx, rule);
 }
 
@@ -3708,39 +3707,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 bind:
        binding->chain = ctx->chain;
        list_add_tail_rcu(&binding->list, &set->bindings);
+       nft_set_trans_bind(ctx, set);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(nf_tables_bind_set);
 
-void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding)
-{
-       if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
-           nft_is_active(ctx->net, set))
-               list_add_tail_rcu(&set->list, &ctx->table->sets);
-
-       list_add_tail_rcu(&binding->list, &set->bindings);
-}
-EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
-
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
-                         struct nft_set_binding *binding)
+                         struct nft_set_binding *binding, bool event)
 {
        list_del_rcu(&binding->list);
 
-       if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
-           nft_is_active(ctx->net, set))
+       if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
                list_del_rcu(&set->list);
+               if (event)
+                       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
+                                            GFP_KERNEL);
+       }
 }
 EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
 
 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
-           nft_is_active(ctx->net, set)) {
-               nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
+       if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
                nft_set_destroy(set);
-       }
 }
 EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
 
@@ -6535,6 +6525,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_rule_notify(&trans->ctx,
                                              nft_trans_rule(trans),
                                              NFT_MSG_DELRULE);
+                       nft_rule_expr_deactivate(&trans->ctx,
+                                                nft_trans_rule(trans),
+                                                NFT_TRANS_COMMIT);
                        break;
                case NFT_MSG_NEWSET:
                        nft_clear(net, nft_trans_set(trans));
@@ -6621,7 +6614,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
                nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
                break;
        case NFT_MSG_NEWSET:
-               nft_set_destroy(nft_trans_set(trans));
+               if (!nft_trans_set_bound(trans))
+                       nft_set_destroy(nft_trans_set(trans));
                break;
        case NFT_MSG_NEWSETELEM:
                nft_set_elem_destroy(nft_trans_elem_set(trans),
@@ -6682,7 +6676,9 @@ static int __nf_tables_abort(struct net *net)
                case NFT_MSG_NEWRULE:
                        trans->ctx.chain->use--;
                        list_del_rcu(&nft_trans_rule(trans)->list);
-                       nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
+                       nft_rule_expr_deactivate(&trans->ctx,
+                                                nft_trans_rule(trans),
+                                                NFT_TRANS_ABORT);
                        break;
                case NFT_MSG_DELRULE:
                        trans->ctx.chain->use++;
@@ -6692,7 +6688,8 @@ static int __nf_tables_abort(struct net *net)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       list_del_rcu(&nft_trans_set(trans)->list);
+                       if (!nft_trans_set_bound(trans))
+                               list_del_rcu(&nft_trans_set(trans)->list);
                        break;
                case NFT_MSG_DELSET:
                        trans->ctx.table->use++;
index 5eb269428832cd5e6463d764f7e38a986cf5e035..fe64df848365b8a4ba137bbbcd3774887dbdc586 100644 (file)
@@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net)
        return net_generic(net, nft_compat_net_id);
 }
 
+static void nft_xt_get(struct nft_xt *xt)
+{
+       /* refcount_inc() warns on 0 -> 1 transition, but we can't
+        * init the reference count to 1 in .select_ops -- we can't
+        * undo such an increase when another expression inside the same
+        * rule fails afterwards.
+        */
+       if (xt->listcnt == 0)
+               refcount_set(&xt->refcnt, 1);
+       else
+               refcount_inc(&xt->refcnt);
+
+       xt->listcnt++;
+}
+
 static bool nft_xt_put(struct nft_xt *xt)
 {
        if (refcount_dec_and_test(&xt->refcnt)) {
@@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
 
        nft_xt = container_of(expr->ops, struct nft_xt, ops);
-       refcount_inc(&nft_xt->refcnt);
+       nft_xt_get(nft_xt);
        return 0;
 }
 
@@ -504,7 +519,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return ret;
 
        nft_xt = container_of(expr->ops, struct nft_xt, ops);
-       refcount_inc(&nft_xt->refcnt);
+       nft_xt_get(nft_xt);
        return 0;
 }
 
@@ -558,41 +573,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
        __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
 }
 
-static void nft_compat_activate(const struct nft_ctx *ctx,
-                               const struct nft_expr *expr,
-                               struct list_head *h)
-{
-       struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
-
-       if (xt->listcnt == 0)
-               list_add(&xt->head, h);
-
-       xt->listcnt++;
-}
-
-static void nft_compat_activate_mt(const struct nft_ctx *ctx,
-                                  const struct nft_expr *expr)
-{
-       struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
-
-       nft_compat_activate(ctx, expr, &cn->nft_match_list);
-}
-
-static void nft_compat_activate_tg(const struct nft_ctx *ctx,
-                                  const struct nft_expr *expr)
-{
-       struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
-
-       nft_compat_activate(ctx, expr, &cn->nft_target_list);
-}
-
 static void nft_compat_deactivate(const struct nft_ctx *ctx,
-                                 const struct nft_expr *expr)
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
 
-       if (--xt->listcnt == 0)
-               list_del_init(&xt->head);
+       if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
+               if (--xt->listcnt == 0)
+                       list_del_init(&xt->head);
+       }
 }
 
 static void
@@ -848,7 +838,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        nft_match->ops.eval = nft_match_eval;
        nft_match->ops.init = nft_match_init;
        nft_match->ops.destroy = nft_match_destroy;
-       nft_match->ops.activate = nft_compat_activate_mt;
        nft_match->ops.deactivate = nft_compat_deactivate;
        nft_match->ops.dump = nft_match_dump;
        nft_match->ops.validate = nft_match_validate;
@@ -866,7 +855,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
 
        nft_match->ops.size = matchsize;
 
-       nft_match->listcnt = 1;
+       nft_match->listcnt = 0;
        list_add(&nft_match->head, &cn->nft_match_list);
 
        return &nft_match->ops;
@@ -953,7 +942,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
        nft_target->ops.init = nft_target_init;
        nft_target->ops.destroy = nft_target_destroy;
-       nft_target->ops.activate = nft_compat_activate_tg;
        nft_target->ops.deactivate = nft_compat_deactivate;
        nft_target->ops.dump = nft_target_dump;
        nft_target->ops.validate = nft_target_validate;
@@ -964,7 +952,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        else
                nft_target->ops.eval = nft_target_eval_xt;
 
-       nft_target->listcnt = 1;
+       nft_target->listcnt = 0;
        list_add(&nft_target->head, &cn->nft_target_list);
 
        return &nft_target->ops;
index 07d4efd3d85182997edb4ae0a1fd74d88221a07f..f1172f99752bd1a5d127c1086885b6fc83efc0dc 100644 (file)
@@ -235,20 +235,17 @@ err1:
        return err;
 }
 
-static void nft_dynset_activate(const struct nft_ctx *ctx,
-                               const struct nft_expr *expr)
-{
-       struct nft_dynset *priv = nft_expr_priv(expr);
-
-       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
 static void nft_dynset_deactivate(const struct nft_ctx *ctx,
-                                 const struct nft_expr *expr)
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_dynset *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+       if (phase == NFT_TRANS_PREPARE)
+               return;
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+                            phase == NFT_TRANS_COMMIT);
 }
 
 static void nft_dynset_destroy(const struct nft_ctx *ctx,
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
        .eval           = nft_dynset_eval,
        .init           = nft_dynset_init,
        .destroy        = nft_dynset_destroy,
-       .activate       = nft_dynset_activate,
        .deactivate     = nft_dynset_deactivate,
        .dump           = nft_dynset_dump,
 };
index 0777a93211e2b576e57eec2f4aaec71d57f3700d..3f6d1d2a628186c964e79727bbb62fb59370b919 100644 (file)
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
 }
 
 static void nft_immediate_deactivate(const struct nft_ctx *ctx,
-                                    const struct nft_expr *expr)
+                                    const struct nft_expr *expr,
+                                    enum nft_trans_phase phase)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
 
+       if (phase == NFT_TRANS_COMMIT)
+               return;
+
        return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
 }
 
index 227b2b15a19cbd979df780b3660e2395b689c5db..14496da5141d3f1fa72ec4deb16fba3d8930c62c 100644 (file)
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        return 0;
 }
 
-static void nft_lookup_activate(const struct nft_ctx *ctx,
-                               const struct nft_expr *expr)
-{
-       struct nft_lookup *priv = nft_expr_priv(expr);
-
-       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
 static void nft_lookup_deactivate(const struct nft_ctx *ctx,
-                                 const struct nft_expr *expr)
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_lookup *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+       if (phase == NFT_TRANS_PREPARE)
+               return;
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+                            phase == NFT_TRANS_COMMIT);
 }
 
 static void nft_lookup_destroy(const struct nft_ctx *ctx,
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
        .eval           = nft_lookup_eval,
        .init           = nft_lookup_init,
-       .activate       = nft_lookup_activate,
        .deactivate     = nft_lookup_deactivate,
        .destroy        = nft_lookup_destroy,
        .dump           = nft_lookup_dump,
index a3185ca2a3a985712f5b2262df3f9e28af6fab4e..ae178e914486174cd0c259497159a3eef8bcbb0c 100644 (file)
@@ -155,20 +155,17 @@ nla_put_failure:
        return -1;
 }
 
-static void nft_objref_map_activate(const struct nft_ctx *ctx,
-                                   const struct nft_expr *expr)
-{
-       struct nft_objref_map *priv = nft_expr_priv(expr);
-
-       nf_tables_rebind_set(ctx, priv->set, &priv->binding);
-}
-
 static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
-                                     const struct nft_expr *expr)
+                                     const struct nft_expr *expr,
+                                     enum nft_trans_phase phase)
 {
        struct nft_objref_map *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
+       if (phase == NFT_TRANS_PREPARE)
+               return;
+
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding,
+                            phase == NFT_TRANS_COMMIT);
 }
 
 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
        .eval           = nft_objref_map_eval,
        .init           = nft_objref_map_init,
-       .activate       = nft_objref_map_activate,
        .deactivate     = nft_objref_map_deactivate,
        .destroy        = nft_objref_map_destroy,
        .dump           = nft_objref_map_dump,
index 762d2c6788a385631a312a39625d67a1154ef596..17c9d9f0c8483b4b0a887e69e7caac246c369423 100644 (file)
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
        __rds_create_bind_key(key, addr, port, scope_id);
        rcu_read_lock();
        rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
-       if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
-               rds_sock_addref(rs);
-       else
+       if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
+                  !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
                rs = NULL;
+
        rcu_read_unlock();
 
        rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
index eaf19ebaa964e6442cfa4117f862fb4aca9b2345..3f7bb11f3290e57fe27007cb2ce57936f6ae47f4 100644 (file)
@@ -596,6 +596,7 @@ error_requeue_call:
        }
 error_no_call:
        release_sock(&rx->sk);
+error_trace:
        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
        return ret;
 
@@ -604,7 +605,7 @@ wait_interrupted:
 wait_error:
        finish_wait(sk_sleep(&rx->sk), &wait);
        call = NULL;
-       goto error_no_call;
+       goto error_trace;
 }
 
 /**
index f6aa57fbbbaff96ea8df59c639a1fc575fa2c25b..12ca9d13db83320b6fe3381b1874206dec433a74 100644 (file)
@@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        if (!tc_skip_hw(fnew->flags)) {
                err = fl_hw_replace_filter(tp, fnew, extack);
                if (err)
-                       goto errout_mask;
+                       goto errout_mask_ht;
        }
 
        if (!tc_in_hw(fnew->flags))
@@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        kfree(mask);
        return 0;
 
+errout_mask_ht:
+       rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
+                              fnew->mask->filter_ht_params);
+
 errout_mask:
        fl_mask_put(head, fnew->mask, false);
 
index f93c3cf9e5674b5d446e90ccd43611eca5fab508..65d6d04546aee0febd6a533264c27701674f6815 100644 (file)
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_transport *transport = NULL;
        struct sctp_sndrcvinfo _sinfo, *sinfo;
-       struct sctp_association *asoc;
+       struct sctp_association *asoc, *tmp;
        struct sctp_cmsgs cmsgs;
        union sctp_addr *daddr;
        bool new = false;
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 
        /* SCTP_SENDALL process */
        if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
-               list_for_each_entry(asoc, &ep->asocs, asocs) {
+               list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
                        err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
                                                        msg_len);
                        if (err == 0)
index 80e0ae5534ecb0e134d609097271967d87f6ba4f..f24633114dfdf215c3abde5ae67e200880edfe94 100644 (file)
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
        }
 }
 
+static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
+{
+       size_t index = 0;
+
+       while (count--) {
+               if (elem == flex_array_get(fa, index))
+                       break;
+               index++;
+       }
+
+       return index;
+}
+
 /* Migrates chunks from stream queues to new stream queues if needed,
  * but not across associations. Also, removes those chunks to streams
  * higher than the new max.
@@ -147,6 +160,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
 
        if (stream->out) {
                fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
+               if (stream->out_curr) {
+                       size_t index = fa_index(stream->out, stream->out_curr,
+                                               stream->outcnt);
+
+                       BUG_ON(index == stream->outcnt);
+                       stream->out_curr = flex_array_get(out, index);
+               }
                fa_free(stream->out);
        }
 
index c4e56602e0c61476f755e157c35da92a8fbcaaa2..b04a813fc865a4299a6d9e19844710bfef1d3836 100644 (file)
@@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
        smc = smc_sk(sk);
        lock_sock(sk);
+       if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
+               /* socket was connected before, no more data to read */
+               rc = 0;
+               goto out;
+       }
        if ((sk->sk_state == SMC_INIT) ||
            (sk->sk_state == SMC_LISTEN) ||
            (sk->sk_state == SMC_CLOSED))
@@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
 
        smc = smc_sk(sk);
        lock_sock(sk);
-
+       if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
+               /* socket was connected before, no more data to read */
+               rc = 0;
+               goto out;
+       }
        if (sk->sk_state == SMC_INIT ||
            sk->sk_state == SMC_LISTEN ||
            sk->sk_state == SMC_CLOSED)
index db83332ac1c8ce285f29e2bcc69e22f203b8afc3..a712c9f8699b19f6d61098dc8473ae1d57a2680d 100644 (file)
 
 /********************************** send *************************************/
 
-struct smc_cdc_tx_pend {
-       struct smc_connection   *conn;          /* socket connection */
-       union smc_host_cursor   cursor; /* tx sndbuf cursor sent */
-       union smc_host_cursor   p_cursor;       /* rx RMBE cursor produced */
-       u16                     ctrl_seq;       /* conn. tx sequence # */
-};
-
 /* handler for send/transmission completion of a CDC msg */
 static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
                               struct smc_link *link,
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
 
 int smc_cdc_get_free_slot(struct smc_connection *conn,
                          struct smc_wr_buf **wr_buf,
+                         struct smc_rdma_wr **wr_rdma_buf,
                          struct smc_cdc_tx_pend **pend)
 {
        struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
        int rc;
 
        rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+                                    wr_rdma_buf,
                                     (struct smc_wr_tx_pend_priv **)pend);
        if (!conn->alert_token_local)
                /* abnormal termination */
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
                     struct smc_wr_buf *wr_buf,
                     struct smc_cdc_tx_pend *pend)
 {
+       union smc_host_cursor cfed;
        struct smc_link *link;
        int rc;
 
@@ -107,10 +103,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
        smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
                            &conn->local_tx_ctrl, conn);
+       smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
        rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
        if (!rc)
-               smc_curs_copy(&conn->rx_curs_confirmed,
-                             &conn->local_tx_ctrl.cons, conn);
+               smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
 
        return rc;
 }
@@ -121,11 +117,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
+       rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
        if (rc)
                return rc;
 
-       return smc_cdc_msg_send(conn, wr_buf, pend);
+       spin_lock_bh(&conn->send_lock);
+       rc = smc_cdc_msg_send(conn, wr_buf, pend);
+       spin_unlock_bh(&conn->send_lock);
+       return rc;
 }
 
 int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
index b5bfe38c7f9b6a87258adc0aecce58c31c2a164a..271e2524dc8f988706f021cbaf2129b4e3823f02 100644 (file)
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
 #endif
 }
 
-/* calculate cursor difference between old and new, where old <= new */
+/* calculate cursor difference between old and new, where old <= new and
+ * difference cannot exceed size
+ */
 static inline int smc_curs_diff(unsigned int size,
                                union smc_host_cursor *old,
                                union smc_host_cursor *new)
@@ -185,6 +187,28 @@ static inline int smc_curs_comp(unsigned int size,
        return smc_curs_diff(size, old, new);
 }
 
+/* calculate cursor difference between old and new, where old <= new and
+ * difference may exceed size
+ */
+static inline int smc_curs_diff_large(unsigned int size,
+                                     union smc_host_cursor *old,
+                                     union smc_host_cursor *new)
+{
+       if (old->wrap < new->wrap)
+               return min_t(int,
+                            (size - old->count) + new->count +
+                            (new->wrap - old->wrap - 1) * size,
+                            size);
+
+       if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */
+               return min_t(int,
+                            (size - old->count) + new->count +
+                            (new->wrap + 0xffff - old->wrap) * size,
+                            size);
+
+       return max_t(int, 0, (new->count - old->count));
+}
+
 static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
                                          union smc_host_cursor *local,
                                          struct smc_connection *conn)
@@ -270,10 +294,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
                smcr_cdc_msg_to_host(local, peer, conn);
 }
 
-struct smc_cdc_tx_pend;
+struct smc_cdc_tx_pend {
+       struct smc_connection   *conn;          /* socket connection */
+       union smc_host_cursor   cursor;         /* tx sndbuf cursor sent */
+       union smc_host_cursor   p_cursor;       /* rx RMBE cursor produced */
+       u16                     ctrl_seq;       /* conn. tx sequence # */
+};
 
 int smc_cdc_get_free_slot(struct smc_connection *conn,
                          struct smc_wr_buf **wr_buf,
+                         struct smc_rdma_wr **wr_rdma_buf,
                          struct smc_cdc_tx_pend **pend);
 void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
 int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
index 776e9dfc915dd5fb9200a5440862b7bbbc4f5440..d53fd588d1f5a4450819934c83cf960dc29ff46b 100644 (file)
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
        vec.iov_len = sizeof(struct smc_clc_msg_decline);
        len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
                             sizeof(struct smc_clc_msg_decline));
-       if (len < sizeof(struct smc_clc_msg_decline))
+       if (len < 0 || len < sizeof(struct smc_clc_msg_decline))
                len = -EPROTO;
        return len > 0 ? 0 : len;
 }
index ea2b87f29469610c1afaf5e043a935c24589425b..e39cadda1bf5a4c48b9c31459b2046a5c921a629 100644 (file)
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work)
 
        switch (sk->sk_state) {
        case SMC_INIT:
-               if (atomic_read(&conn->bytes_to_rcv) ||
-                   (rxflags->peer_done_writing &&
-                    !smc_cdc_rxed_any_close(conn))) {
-                       sk->sk_state = SMC_APPCLOSEWAIT1;
-               } else {
-                       sk->sk_state = SMC_CLOSED;
-                       sock_put(sk); /* passive closing */
-               }
+               sk->sk_state = SMC_APPCLOSEWAIT1;
                break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_APPCLOSEWAIT1;
index 35c1cdc93e1c54c3ad0003aea46f0b7597a7e872..aa1c551cee8160ac732399091bdf26c3febaee46 100644 (file)
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
 {
        struct smc_link_group *lgr = conn->lgr;
 
+       if (!lgr)
+               return;
        write_lock_bh(&lgr->conns_lock);
        if (conn->alert_token_local) {
                __smc_lgr_unregister_conn(conn);
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
                conn->sndbuf_desc->used = 0;
        if (conn->rmb_desc) {
                if (!conn->rmb_desc->regerr) {
-                       conn->rmb_desc->used = 0;
                        if (!lgr->is_smcd) {
                                /* unregister rmb with peer */
                                smc_llc_do_delete_rkey(
                                                &lgr->lnk[SMC_SINGLE_LINK],
                                                conn->rmb_desc);
                        }
+                       conn->rmb_desc->used = 0;
                } else {
                        /* buf registration failed, reuse not possible */
                        write_lock_bh(&lgr->rmbs_lock);
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
                        local_contact = SMC_REUSE_CONTACT;
                        conn->lgr = lgr;
                        smc_lgr_register_conn(conn); /* add smc conn to lgr */
+                       if (delayed_work_pending(&lgr->free_work))
+                               cancel_delayed_work(&lgr->free_work);
                        write_unlock_bh(&lgr->conns_lock);
                        break;
                }
index b00287989a3dea43975ce728df8a9e2885656b71..8806d2afa6edb1178f4ad2904aa1346ee42bcae6 100644 (file)
@@ -52,6 +52,24 @@ enum smc_wr_reg_state {
        FAILED          /* ib_wr_reg_mr response: failure */
 };
 
+struct smc_rdma_sge {                          /* sges for RDMA writes */
+       struct ib_sge           wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
+};
+
+#define SMC_MAX_RDMA_WRITES    2               /* max. # of RDMA writes per
+                                                * message send
+                                                */
+
+struct smc_rdma_sges {                         /* sges per message send */
+       struct smc_rdma_sge     tx_rdma_sge[SMC_MAX_RDMA_WRITES];
+};
+
+struct smc_rdma_wr {                           /* work requests per message
+                                                * send
+                                                */
+       struct ib_rdma_wr       wr_tx_rdma[SMC_MAX_RDMA_WRITES];
+};
+
 struct smc_link {
        struct smc_ib_device    *smcibdev;      /* ib-device */
        u8                      ibport;         /* port - values 1 | 2 */
@@ -64,6 +82,8 @@ struct smc_link {
        struct smc_wr_buf       *wr_tx_bufs;    /* WR send payload buffers */
        struct ib_send_wr       *wr_tx_ibs;     /* WR send meta data */
        struct ib_sge           *wr_tx_sges;    /* WR send gather meta data */
+       struct smc_rdma_sges    *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
+       struct smc_rdma_wr      *wr_tx_rdmas;   /* WR RDMA WRITE */
        struct smc_wr_tx_pend   *wr_tx_pends;   /* WR send waiting for CQE */
        /* above four vectors have wr_tx_cnt elements and use the same index */
        dma_addr_t              wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
index e519ef29c0ffcb74baf25753a58048504fedd813..76487a16934ed015a05d9787c682b3df4e253565 100644 (file)
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
 
 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
 {
-       struct smc_ib_device *smcibdev =
-               (struct smc_ib_device *)ibevent->device;
+       struct smc_link *lnk = (struct smc_link *)priv;
+       struct smc_ib_device *smcibdev = lnk->smcibdev;
        u8 port_idx;
 
        switch (ibevent->event) {
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
        case IB_EVENT_GID_CHANGE:
        case IB_EVENT_PORT_ERR:
        case IB_EVENT_QP_ACCESS_ERR:
-               port_idx = ibevent->element.port_num - 1;
+               port_idx = ibevent->element.qp->port - 1;
                set_bit(port_idx, &smcibdev->port_event_mask);
                schedule_work(&smcibdev->port_event_work);
                break;
index a6d3623d06f422073995b5c4e42d31b46088b466..4fd60c522802949e8b0f6931f911fc445765873b 100644 (file)
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link,
 {
        int rc;
 
-       rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend);
+       rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
+                                    pend);
        if (rc < 0)
                return rc;
        BUILD_BUG_ON_MSG(
index 7cb3e4f07c10f5da154c0322ef4fae54a6dc85e2..632c3109dee52c20024aeeb47437eb4874c8da87 100644 (file)
@@ -27,7 +27,7 @@
 static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
        [SMC_PNETID_NAME] = {
                .type = NLA_NUL_STRING,
-               .len = SMC_MAX_PNETID_LEN - 1
+               .len = SMC_MAX_PNETID_LEN
        },
        [SMC_PNETID_ETHNAME] = {
                .type = NLA_NUL_STRING,
index d8366ed517576ba7f258ad74f0cf91f7fd92d7e0..f93f3580c1006e0638a0c127a631da5cdc7f6b5f 100644 (file)
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
                        conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
 
                if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
+                       if (send_done)
+                               return send_done;
                        rc = smc_tx_wait(smc, msg->msg_flags);
-                       if (rc) {
-                               if (send_done)
-                                       return send_done;
+                       if (rc)
                                goto out_err;
-                       }
                        continue;
                }
 
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
 
 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
-                            int num_sges, struct ib_sge sges[])
+                            int num_sges, struct ib_rdma_wr *rdma_wr)
 {
        struct smc_link_group *lgr = conn->lgr;
-       struct ib_rdma_wr rdma_wr;
        struct smc_link *link;
        int rc;
 
-       memset(&rdma_wr, 0, sizeof(rdma_wr));
        link = &lgr->lnk[SMC_SINGLE_LINK];
-       rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
-       rdma_wr.wr.sg_list = sges;
-       rdma_wr.wr.num_sge = num_sges;
-       rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
-       rdma_wr.remote_addr =
+       rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
+       rdma_wr->wr.num_sge = num_sges;
+       rdma_wr->remote_addr =
                lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
                /* RMBE within RMB */
                conn->tx_off +
                /* offset within RMBE */
                peer_rmbe_offset;
-       rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
-       rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL);
+       rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
+       rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
        if (rc) {
                conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
                smc_lgr_terminate(lgr);
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn,
 /* SMC-R helper for smc_tx_rdma_writes() */
 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
                               size_t src_off, size_t src_len,
-                              size_t dst_off, size_t dst_len)
+                              size_t dst_off, size_t dst_len,
+                              struct smc_rdma_wr *wr_rdma_buf)
 {
        dma_addr_t dma_addr =
                sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl);
-       struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
        int src_len_sum = src_len, dst_len_sum = dst_len;
-       struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
        int sent_count = src_off;
        int srcchunk, dstchunk;
        int num_sges;
        int rc;
 
        for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+               struct ib_sge *sge =
+                       wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
+
                num_sges = 0;
                for (srcchunk = 0; srcchunk < 2; srcchunk++) {
-                       sges[srcchunk].addr = dma_addr + src_off;
-                       sges[srcchunk].length = src_len;
-                       sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+                       sge[srcchunk].addr = dma_addr + src_off;
+                       sge[srcchunk].length = src_len;
                        num_sges++;
 
                        src_off += src_len;
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
                        src_len = dst_len - src_len; /* remainder */
                        src_len_sum += src_len;
                }
-               rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+               rc = smc_tx_rdma_write(conn, dst_off, num_sges,
+                                      &wr_rdma_buf->wr_tx_rdma[dstchunk]);
                if (rc)
                        return rc;
                if (dst_len_sum == len)
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
  * usable snd_wnd as max transmit
  */
-static int smc_tx_rdma_writes(struct smc_connection *conn)
+static int smc_tx_rdma_writes(struct smc_connection *conn,
+                             struct smc_rdma_wr *wr_rdma_buf)
 {
        size_t len, src_len, dst_off, dst_len; /* current chunk values */
        union smc_host_cursor sent, prep, prod, cons;
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
                                         dst_off, dst_len);
        else
                rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
-                                        dst_off, dst_len);
+                                        dst_off, dst_len, wr_rdma_buf);
        if (rc)
                return rc;
 
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn)
 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
 {
        struct smc_cdc_producer_flags *pflags;
+       struct smc_rdma_wr *wr_rdma_buf;
        struct smc_cdc_tx_pend *pend;
        struct smc_wr_buf *wr_buf;
        int rc;
 
-       spin_lock_bh(&conn->send_lock);
-       rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
+       rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend);
        if (rc < 0) {
                if (rc == -EBUSY) {
                        struct smc_sock *smc =
                                container_of(conn, struct smc_sock, conn);
 
-                       if (smc->sk.sk_err == ECONNABORTED) {
-                               rc = sock_error(&smc->sk);
-                               goto out_unlock;
-                       }
+                       if (smc->sk.sk_err == ECONNABORTED)
+                               return sock_error(&smc->sk);
                        rc = 0;
                        if (conn->alert_token_local) /* connection healthy */
                                mod_delayed_work(system_wq, &conn->tx_work,
                                                 SMC_TX_WORK_DELAY);
                }
-               goto out_unlock;
+               return rc;
        }
 
+       spin_lock_bh(&conn->send_lock);
        if (!conn->local_tx_ctrl.prod_flags.urg_data_present) {
-               rc = smc_tx_rdma_writes(conn);
+               rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
                if (rc) {
                        smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
                                           (struct smc_wr_tx_pend_priv *)pend);
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
 
        spin_lock_bh(&conn->send_lock);
        if (!pflags->urg_data_present)
-               rc = smc_tx_rdma_writes(conn);
+               rc = smc_tx_rdma_writes(conn, NULL);
        if (!rc)
                rc = smcd_cdc_msg_send(conn);
 
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
        if (to_confirm > conn->rmbe_update_limit) {
                smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
                sender_free = conn->rmb_desc->len -
-                             smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+                             smc_curs_diff_large(conn->rmb_desc->len,
+                                                 &cfed, &prod);
        }
 
        if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
index c2694750a6a8abe2f1353dae02e95420e5a01b48..253aa75dc2b6818c1a9d4646535c51e1e59bc3f3 100644 (file)
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
  * @link:              Pointer to smc_link used to later send the message.
  * @handler:           Send completion handler function pointer.
  * @wr_buf:            Out value returns pointer to message buffer.
+ * @wr_rdma_buf:       Out value returns pointer to rdma work request.
  * @wr_pend_priv:      Out value returns pointer serving as handler context.
  *
  * Return: 0 on success, or -errno on error.
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
 int smc_wr_tx_get_free_slot(struct smc_link *link,
                            smc_wr_tx_handler handler,
                            struct smc_wr_buf **wr_buf,
+                           struct smc_rdma_wr **wr_rdma_buf,
                            struct smc_wr_tx_pend_priv **wr_pend_priv)
 {
        struct smc_wr_tx_pend *wr_pend;
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
        wr_ib = &link->wr_tx_ibs[idx];
        wr_ib->wr_id = wr_id;
        *wr_buf = &link->wr_tx_bufs[idx];
+       if (wr_rdma_buf)
+               *wr_rdma_buf = &link->wr_tx_rdmas[idx];
        *wr_pend_priv = &wr_pend->priv;
        return 0;
 }
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link,
                u32 idx = pend->idx;
 
                /* clear the full struct smc_wr_tx_pend including .priv */
-               memset(&link->wr_tx_pends[pend->idx], 0,
-                      sizeof(link->wr_tx_pends[pend->idx]));
-               memset(&link->wr_tx_bufs[pend->idx], 0,
-                      sizeof(link->wr_tx_bufs[pend->idx]));
+               memset(&link->wr_tx_pends[idx], 0,
+                      sizeof(link->wr_tx_pends[idx]));
+               memset(&link->wr_tx_bufs[idx], 0,
+                      sizeof(link->wr_tx_bufs[idx]));
                test_and_clear_bit(idx, link->wr_tx_mask);
                return 1;
        }
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk)
                        lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
                lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
                lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
+               lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
+                       lnk->roce_pd->local_dma_lkey;
+               lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
+                       lnk->roce_pd->local_dma_lkey;
+               lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
+                       lnk->roce_pd->local_dma_lkey;
+               lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
+                       lnk->roce_pd->local_dma_lkey;
                lnk->wr_tx_ibs[i].next = NULL;
                lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
                lnk->wr_tx_ibs[i].num_sge = 1;
                lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
                lnk->wr_tx_ibs[i].send_flags =
                        IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+               lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
+               lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
+               lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
+                       lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
+               lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
+                       lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
        }
        for (i = 0; i < lnk->wr_rx_cnt; i++) {
                lnk->wr_rx_sges[i].addr =
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
        lnk->wr_tx_mask = NULL;
        kfree(lnk->wr_tx_sges);
        lnk->wr_tx_sges = NULL;
+       kfree(lnk->wr_tx_rdma_sges);
+       lnk->wr_tx_rdma_sges = NULL;
        kfree(lnk->wr_rx_sges);
        lnk->wr_rx_sges = NULL;
+       kfree(lnk->wr_tx_rdmas);
+       lnk->wr_tx_rdmas = NULL;
        kfree(lnk->wr_rx_ibs);
        lnk->wr_rx_ibs = NULL;
        kfree(lnk->wr_tx_ibs);
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
                                  GFP_KERNEL);
        if (!link->wr_rx_ibs)
                goto no_mem_wr_tx_ibs;
+       link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
+                                   sizeof(link->wr_tx_rdmas[0]),
+                                   GFP_KERNEL);
+       if (!link->wr_tx_rdmas)
+               goto no_mem_wr_rx_ibs;
+       link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
+                                       sizeof(link->wr_tx_rdma_sges[0]),
+                                       GFP_KERNEL);
+       if (!link->wr_tx_rdma_sges)
+               goto no_mem_wr_tx_rdmas;
        link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
                                   GFP_KERNEL);
        if (!link->wr_tx_sges)
-               goto no_mem_wr_rx_ibs;
+               goto no_mem_wr_tx_rdma_sges;
        link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
                                   sizeof(link->wr_rx_sges[0]),
                                   GFP_KERNEL);
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges:
        kfree(link->wr_rx_sges);
 no_mem_wr_tx_sges:
        kfree(link->wr_tx_sges);
+no_mem_wr_tx_rdma_sges:
+       kfree(link->wr_tx_rdma_sges);
+no_mem_wr_tx_rdmas:
+       kfree(link->wr_tx_rdmas);
 no_mem_wr_rx_ibs:
        kfree(link->wr_rx_ibs);
 no_mem_wr_tx_ibs:
index 1d85bb14fd6f88c455274ef13c5003fb4eb46e28..09bf32fd39596ea241c97f3cd2dcbb8f146d9604 100644 (file)
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev);
 
 int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
                            struct smc_wr_buf **wr_buf,
+                           struct smc_rdma_wr **wrs,
                            struct smc_wr_tx_pend_priv **wr_pend_priv);
 int smc_wr_tx_put_slot(struct smc_link *link,
                       struct smc_wr_tx_pend_priv *wr_pend_priv);
index e89884e2197babb827b34a345ad8d8625c7071ce..d80d87a395ea99c58624feb204c896e697140bfa 100644 (file)
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
 EXPORT_SYMBOL(dlci_ioctl_set);
 
 static long sock_do_ioctl(struct net *net, struct socket *sock,
-                         unsigned int cmd, unsigned long arg,
-                         unsigned int ifreq_size)
+                         unsigned int cmd, unsigned long arg)
 {
        int err;
        void __user *argp = (void __user *)arg;
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
        } else {
                struct ifreq ifr;
                bool need_copyout;
-               if (copy_from_user(&ifr, argp, ifreq_size))
+               if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
                        return -EFAULT;
                err = dev_ioctl(net, cmd, &ifr, &need_copyout);
                if (!err && need_copyout)
-                       if (copy_to_user(argp, &ifr, ifreq_size))
+                       if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
                                return -EFAULT;
        }
        return err;
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                        err = open_related_ns(&net->ns, get_net_ns);
                        break;
                default:
-                       err = sock_do_ioctl(net, sock, cmd, arg,
-                                           sizeof(struct ifreq));
+                       err = sock_do_ioctl(net, sock, cmd, arg);
                        break;
                }
        return err;
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
-                           sizeof(struct compat_ifreq));
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
        set_fs(old_fs);
        if (!err)
                err = compat_put_timeval(&ktv, up);
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
-                           sizeof(struct compat_ifreq));
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
        set_fs(old_fs);
        if (!err)
                err = compat_put_timespec(&kts, up);
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
        return dev_ioctl(net, cmd, &ifreq, NULL);
 }
 
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
+                             unsigned int cmd,
+                             struct compat_ifreq __user *uifr32)
+{
+       struct ifreq __user *uifr;
+       int err;
+
+       /* Handle the fact that while struct ifreq has the same *layout* on
+        * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
+        * which are handled elsewhere, it still has different *size* due to
+        * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
+        * resulting in struct ifreq being 32 and 40 bytes respectively).
+        * As a result, if the struct happens to be at the end of a page and
+        * the next page isn't readable/writable, we get a fault. To prevent
+        * that, copy back and forth to the full size.
+        */
+
+       uifr = compat_alloc_user_space(sizeof(*uifr));
+       if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
+               return -EFAULT;
+
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
+
+       if (!err) {
+               switch (cmd) {
+               case SIOCGIFFLAGS:
+               case SIOCGIFMETRIC:
+               case SIOCGIFMTU:
+               case SIOCGIFMEM:
+               case SIOCGIFHWADDR:
+               case SIOCGIFINDEX:
+               case SIOCGIFADDR:
+               case SIOCGIFBRDADDR:
+               case SIOCGIFDSTADDR:
+               case SIOCGIFNETMASK:
+               case SIOCGIFPFLAGS:
+               case SIOCGIFTXQLEN:
+               case SIOCGMIIPHY:
+               case SIOCGMIIREG:
+               case SIOCGIFNAME:
+                       if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
+                               err = -EFAULT;
+                       break;
+               }
+       }
+       return err;
+}
+
 static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
                        struct compat_ifreq __user *uifr32)
 {
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
        }
 
        set_fs(KERNEL_DS);
-       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
-                           sizeof(struct compat_ifreq));
+       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
        set_fs(old_fs);
 
 out:
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
        case SIOCSIFTXQLEN:
        case SIOCBRADDIF:
        case SIOCBRDELIF:
+       case SIOCGIFNAME:
        case SIOCSIFNAME:
        case SIOCGMIIPHY:
        case SIOCGMIIREG:
        case SIOCSMIIREG:
-       case SIOCSARP:
-       case SIOCGARP:
-       case SIOCDARP:
-       case SIOCATMARK:
        case SIOCBONDENSLAVE:
        case SIOCBONDRELEASE:
        case SIOCBONDSETHWADDR:
        case SIOCBONDCHANGEACTIVE:
-       case SIOCGIFNAME:
-               return sock_do_ioctl(net, sock, cmd, arg,
-                                    sizeof(struct compat_ifreq));
+               return compat_ifreq_ioctl(net, sock, cmd, argp);
+
+       case SIOCSARP:
+       case SIOCGARP:
+       case SIOCDARP:
+       case SIOCATMARK:
+               return sock_do_ioctl(net, sock, cmd, arg);
        }
 
        return -ENOIOCTLCMD;
index cf51b8f9b15f5d2806366b116c1226fff1b2fd8b..1f200119268ccdff5674f49eadebbde0de86fdd1 100644 (file)
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
                                      DMA_TO_DEVICE);
 }
 
+/* If the xdr_buf has more elements than the device can
+ * transmit in a single RDMA Send, then the reply will
+ * have to be copied into a bounce buffer.
+ */
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+                                   struct xdr_buf *xdr,
+                                   __be32 *wr_lst)
+{
+       int elements;
+
+       /* xdr->head */
+       elements = 1;
+
+       /* xdr->pages */
+       if (!wr_lst) {
+               unsigned int remaining;
+               unsigned long pageoff;
+
+               pageoff = xdr->page_base & ~PAGE_MASK;
+               remaining = xdr->page_len;
+               while (remaining) {
+                       ++elements;
+                       remaining -= min_t(u32, PAGE_SIZE - pageoff,
+                                          remaining);
+                       pageoff = 0;
+               }
+       }
+
+       /* xdr->tail */
+       if (xdr->tail[0].iov_len)
+               ++elements;
+
+       /* assume 1 SGE is needed for the transport header */
+       return elements >= rdma->sc_max_send_sges;
+}
+
+/* The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header
+ * buffer.
+ */
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+                                     struct svc_rdma_send_ctxt *ctxt,
+                                     struct xdr_buf *xdr, __be32 *wr_lst)
+{
+       unsigned char *dst, *tailbase;
+       unsigned int taillen;
+
+       dst = ctxt->sc_xprt_buf;
+       dst += ctxt->sc_sges[0].length;
+
+       memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
+       dst += xdr->head[0].iov_len;
+
+       tailbase = xdr->tail[0].iov_base;
+       taillen = xdr->tail[0].iov_len;
+       if (wr_lst) {
+               u32 xdrpad;
+
+               xdrpad = xdr_padsize(xdr->page_len);
+               if (taillen && xdrpad) {
+                       tailbase += xdrpad;
+                       taillen -= xdrpad;
+               }
+       } else {
+               unsigned int len, remaining;
+               unsigned long pageoff;
+               struct page **ppages;
+
+               ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+               pageoff = xdr->page_base & ~PAGE_MASK;
+               remaining = xdr->page_len;
+               while (remaining) {
+                       len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+                       memcpy(dst, page_address(*ppages), len);
+                       remaining -= len;
+                       dst += len;
+                       pageoff = 0;
+               }
+       }
+
+       if (taillen)
+               memcpy(dst, tailbase, taillen);
+
+       ctxt->sc_sges[0].length += xdr->len;
+       ib_dma_sync_single_for_device(rdma->sc_pd->device,
+                                     ctxt->sc_sges[0].addr,
+                                     ctxt->sc_sges[0].length,
+                                     DMA_TO_DEVICE);
+
+       return 0;
+}
+
 /* svc_rdma_map_reply_msg - Map the buffer holding RPC message
  * @rdma: controlling transport
  * @ctxt: send_ctxt for the Send WR
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        u32 xdr_pad;
        int ret;
 
-       if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-               return -EIO;
+       if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
+               return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+
+       ++ctxt->sc_cur_sge_no;
        ret = svc_rdma_dma_map_buf(rdma, ctxt,
                                   xdr->head[0].iov_base,
                                   xdr->head[0].iov_len);
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        while (remaining) {
                len = min_t(u32, PAGE_SIZE - page_off, remaining);
 
-               if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-                       return -EIO;
+               ++ctxt->sc_cur_sge_no;
                ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
                                            page_off, len);
                if (ret < 0)
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
        len = xdr->tail[0].iov_len;
 tail:
        if (len) {
-               if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
-                       return -EIO;
+               ++ctxt->sc_cur_sge_no;
                ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
                if (ret < 0)
                        return ret;
index 924c17d46903c9ead94c6a2c33d926ef53469392..57f86c63a46369537c3362dae062da82b554c271 100644 (file)
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        /* Transport header, head iovec, tail iovec */
        newxprt->sc_max_send_sges = 3;
        /* Add one SGE per page list entry */
-       newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
-       if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
-               pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
-                      newxprt->sc_max_send_sges);
-               goto errout;
-       }
+       newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
+       if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
+               newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
        newxprt->sc_max_req_size = svcrdma_max_req_size;
        newxprt->sc_max_requests = svcrdma_max_requests;
        newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
index 5d3cce9e8744d5207753107aeb55518f2848f50a..15eb5d3d475094778cd21cfeeec096cb20a89715 100644 (file)
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
 {
        struct virtio_vsock *vsock = virtio_vsock_get();
 
+       if (!vsock)
+               return VMADDR_CID_ANY;
+
        return vsock->guest_cid;
 }
 
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
 
        virtio_vsock_update_guest_cid(vsock);
 
-       ret = vsock_core_init(&virtio_transport.transport);
-       if (ret < 0)
-               goto out_vqs;
-
        vsock->rx_buf_nr = 0;
        vsock->rx_buf_max_nr = 0;
        atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
        mutex_unlock(&the_virtio_vsock_mutex);
        return 0;
 
-out_vqs:
-       vsock->vdev->config->del_vqs(vsock->vdev);
 out:
        kfree(vsock);
        mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
        flush_work(&vsock->event_work);
        flush_work(&vsock->send_pkt_work);
 
+       /* Reset all connected sockets when the device disappear */
+       vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
        vdev->config->reset(vdev);
 
        mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
 
        mutex_lock(&the_virtio_vsock_mutex);
        the_virtio_vsock = NULL;
-       vsock_core_exit();
        mutex_unlock(&the_virtio_vsock_mutex);
 
        vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
        virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
        if (!virtio_vsock_workqueue)
                return -ENOMEM;
+
        ret = register_virtio_driver(&virtio_vsock_driver);
        if (ret)
-               destroy_workqueue(virtio_vsock_workqueue);
+               goto out_wq;
+
+       ret = vsock_core_init(&virtio_transport.transport);
+       if (ret)
+               goto out_vdr;
+
+       return 0;
+
+out_vdr:
+       unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+       destroy_workqueue(virtio_vsock_workqueue);
        return ret;
+
 }
 
 static void __exit virtio_vsock_exit(void)
 {
+       vsock_core_exit();
        unregister_virtio_driver(&virtio_vsock_driver);
        destroy_workqueue(virtio_vsock_workqueue);
 }
index 882d97bdc6bfd49ac3acf80f5674821b15f46e35..550ac9d827fe7d7f266f0ac511afec0aa4d8f8c3 100644 (file)
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
                cfg80211_sched_dfs_chan_update(rdev);
        }
 
+       schedule_work(&cfg80211_disconnect_work);
+
        return err;
 }
 
index c5d6f341860136a002cf3c5b18c82889c5356afb..f6b40563dc633af1074848f256335902997350aa 100644 (file)
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
 bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
                                u32 center_freq_khz, u32 bw_khz);
 
+extern struct work_struct cfg80211_disconnect_work;
+
 /**
  * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
  * @wiphy: the wiphy to validate against
index f741d8376a463b588231550c4cca661e55d2e6b7..7d34cb884840e96d68b43e5f90036d3d3cd306dd 100644 (file)
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work)
        rtnl_unlock();
 }
 
-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
+DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
 
 
 /*
index 33e67bd1dc343cece0573d8dfee9d6e1a3319e9f..32234481ad7dbee128cc9c31531b1843baf672b1 100644 (file)
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
 
        me->verbose = verbose;
 
-       me->fd = open("/dev/mei", O_RDWR);
+       me->fd = open("/dev/mei0", O_RDWR);
        if (me->fd == -1) {
                mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
                goto err;
index 9174f1b3a987f362f783ca545a0f554003240eca..1ec706ced75ca4d0d45c1774bc799c534da71d23 100644 (file)
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev)
        err = snd_hda_codec_build_controls(codec);
        if (err < 0)
                goto error_module;
-       if (codec->card->registered) {
+       /* only register after the bus probe finished; otherwise it's racy */
+       if (!codec->bus->bus_probing && codec->card->registered) {
                err = snd_card_register(codec->card);
                if (err < 0)
                        goto error_module;
index e784130ea4e0eb2d7fec5357fb86872fb44292fc..e5c49003e75fdd81a62fbea142089500d0eb6eae 100644 (file)
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip)
        int dev = chip->dev_index;
        int err;
 
+       to_hda_bus(bus)->bus_probing = 1;
        hda->probe_continued = 1;
 
        /* bind with i915 if needed */
@@ -2269,6 +2270,7 @@ out_free:
        if (err < 0)
                hda->init_failed = 1;
        complete_all(&hda->probe_wait);
+       to_hda_bus(bus)->bus_probing = 0;
        return err;
 }
 
index e5bdbc2456822cb835c9644c7bc69468c91a92c7..29882bda763289374069ec4777e62b781b416673 100644 (file)
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec)
        ca0132_exit_chip(codec);
 
        snd_hda_power_down(codec);
-       if (IS_ENABLED(CONFIG_PCI) && spec->mem_base)
+#ifdef CONFIG_PCI
+       if (spec->mem_base)
                pci_iounmap(codec->bus->pci, spec->mem_base);
+#endif
        kfree(spec->spec_init_verbs);
        kfree(codec->spec);
 }
index 4139aced63f8a80375f4a939a2cb2e38bcb1b41a..6df758adff84bcbc15dcc4effc3cbb98f3ea0d9f 100644 (file)
@@ -515,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
        }
 }
 
+/* get a primary headphone pin if available */
+static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
+{
+       if (spec->gen.autocfg.hp_pins[0])
+               return spec->gen.autocfg.hp_pins[0];
+       if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+               return spec->gen.autocfg.line_out_pins[0];
+       return 0;
+}
 
 /*
  * Realtek SSID verification
@@ -725,9 +734,7 @@ do_sku:
         * 15   : 1 --> enable the function "Mute internal speaker
         *              when the external headphone out jack is plugged"
         */
-       if (!spec->gen.autocfg.hp_pins[0] &&
-           !(spec->gen.autocfg.line_out_pins[0] &&
-             spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
+       if (!alc_get_hp_pin(spec)) {
                hda_nid_t nid;
                tmp = (ass >> 11) & 0x3;        /* HP to chassis */
                nid = ports[tmp];
@@ -2959,7 +2966,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
 static void alc282_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
        int coef78;
 
@@ -2996,7 +3003,7 @@ static void alc282_init(struct hda_codec *codec)
 static void alc282_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
        int coef78;
 
@@ -3074,14 +3081,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
 static void alc283_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
-       if (!spec->gen.autocfg.hp_outs) {
-               if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
-                       hp_pin = spec->gen.autocfg.line_out_pins[0];
-       }
-
        alc283_restore_default_value(codec);
 
        if (!hp_pin)
@@ -3115,14 +3117,9 @@ static void alc283_init(struct hda_codec *codec)
 static void alc283_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
-       if (!spec->gen.autocfg.hp_outs) {
-               if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
-                       hp_pin = spec->gen.autocfg.line_out_pins[0];
-       }
-
        if (!hp_pin) {
                alc269_shutup(codec);
                return;
@@ -3156,7 +3153,7 @@ static void alc283_shutup(struct hda_codec *codec)
 static void alc256_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
        if (!hp_pin)
@@ -3192,7 +3189,7 @@ static void alc256_init(struct hda_codec *codec)
 static void alc256_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
        if (!hp_pin) {
@@ -3228,7 +3225,7 @@ static void alc256_shutup(struct hda_codec *codec)
 static void alc225_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp1_pin_sense, hp2_pin_sense;
 
        if (!hp_pin)
@@ -3271,7 +3268,7 @@ static void alc225_init(struct hda_codec *codec)
 static void alc225_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp1_pin_sense, hp2_pin_sense;
 
        if (!hp_pin) {
@@ -3315,7 +3312,7 @@ static void alc225_shutup(struct hda_codec *codec)
 static void alc_default_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
        if (!hp_pin)
@@ -3344,7 +3341,7 @@ static void alc_default_init(struct hda_codec *codec)
 static void alc_default_shutup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
        if (!hp_pin) {
@@ -3376,7 +3373,7 @@ static void alc_default_shutup(struct hda_codec *codec)
 static void alc294_hp_init(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
        int i, val;
 
        if (!hp_pin)
@@ -4780,7 +4777,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
        struct alc_spec *spec = codec->spec;
 
        hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       hda_nid_t hp_pin = alc_get_hp_pin(spec);
 
        int new_headset_mode;
 
@@ -5059,7 +5056,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
 static void alc_shutup_dell_xps13(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       int hp_pin = spec->gen.autocfg.hp_pins[0];
+       int hp_pin = alc_get_hp_pin(spec);
 
        /* Prevent pop noises when headphones are plugged in */
        snd_hda_codec_write(codec, hp_pin, 0,
@@ -5152,7 +5149,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
 
        if (action == HDA_FIXUP_ACT_PROBE) {
                int mic_pin = find_ext_mic_pin(codec);
-               int hp_pin = spec->gen.autocfg.hp_pins[0];
+               int hp_pin = alc_get_hp_pin(spec);
 
                if (snd_BUG_ON(!mic_pin || !hp_pin))
                        return;
@@ -5634,6 +5631,7 @@ enum {
        ALC294_FIXUP_ASUS_HEADSET_MIC,
        ALC294_FIXUP_ASUS_SPK,
        ALC225_FIXUP_HEADSET_JACK,
+       ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6580,6 +6578,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_jack,
        },
+       [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6758,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
index bb8372833fc227c09b08a970c002a86a2b29978e..7e65fe853ee3accb793b22cf321cbddeb81e6265 100644 (file)
@@ -1567,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case 0x20b1:  /* XMOS based devices */
        case 0x152a:  /* Thesycon devices */
        case 0x25ce:  /* Mytek devices */
+       case 0x2ab6:  /* T+A devices */
                if (fp->dsd_raw)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index 897483457bf0306b9831e3dae4f63d4643c2555e..f7261fad45c19cd7859d8d68d87aa26eb71ff128 100644 (file)
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key)
        snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
 
        fdi = fopen(path, "r");
-       if (!fdi) {
-               p_err("can't open fdinfo: %s", strerror(errno));
+       if (!fdi)
                return NULL;
-       }
 
        while ((n = getline(&line, &line_n, fdi)) > 0) {
                char *value;
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key)
 
                value = strchr(line, '\t');
                if (!value || !value[1]) {
-                       p_err("malformed fdinfo!?");
                        free(line);
                        return NULL;
                }
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key)
                return line;
        }
 
-       p_err("key '%s' not found in fdinfo", key);
        free(line);
        fclose(fdi);
        return NULL;
index 2037e3dc864bae921a62bd9f9071b7a0b5f6342b..1ef1ee2280a28330925acf9aab4bd7dd8ffade3f 100644 (file)
@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
        return argv + i;
 }
 
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+       unsigned int i, n, step;
+
+       if (!map_is_per_cpu(info->type))
+               return;
+
+       n = get_possible_cpus();
+       step = round_up(info->value_size, 8);
+       for (i = 1; i < n; i++)
+               memcpy(value + i * step, value, info->value_size);
+}
+
 static int parse_elem(char **argv, struct bpf_map_info *info,
                      void *key, void *value, __u32 key_size, __u32 value_size,
                      __u32 *flags, __u32 **value_fd)
@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
                        argv = parse_bytes(argv, "value", value, value_size);
                        if (!argv)
                                return -1;
+
+                       fill_per_cpu_value(info, value);
                }
 
                return parse_elem(argv, info, key, NULL, key_size, value_size,
@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
                                jsonw_uint_field(json_wtr, "owner_prog_type",
                                                 prog_type);
                }
-               if (atoi(owner_jited))
-                       jsonw_bool_field(json_wtr, "owner_jited", true);
-               else
-                       jsonw_bool_field(json_wtr, "owner_jited", false);
+               if (owner_jited)
+                       jsonw_bool_field(json_wtr, "owner_jited",
+                                        !!atoi(owner_jited));
 
                free(owner_prog_type);
                free(owner_jited);
@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
                char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
                char *owner_jited = get_fdinfo(fd, "owner_jited");
 
-               printf("\n\t");
+               if (owner_prog_type || owner_jited)
+                       printf("\n\t");
                if (owner_prog_type) {
                        unsigned int prog_type = atoi(owner_prog_type);
 
@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
                        else
                                printf("owner_prog_type %d  ", prog_type);
                }
-               if (atoi(owner_jited))
-                       printf("owner jited");
-               else
-                       printf("owner not jited");
+               if (owner_jited)
+                       printf("owner%s jited",
+                              atoi(owner_jited) ? "" : " not");
 
                free(owner_prog_type);
                free(owner_jited);
index 2d1bb7d6ff5113c0014855b18e0adc4cd44472fa..b54ed82b9589d2135e0592be1035b928727c7cd7 100644 (file)
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
 
 static int prog_fd_by_tag(unsigned char *tag)
 {
-       struct bpf_prog_info info = {};
-       __u32 len = sizeof(info);
        unsigned int id = 0;
        int err;
        int fd;
 
        while (true) {
+               struct bpf_prog_info info = {};
+               __u32 len = sizeof(info);
+
                err = bpf_prog_get_next_id(id, &id);
                if (err) {
                        p_err("%s", strerror(errno));
index 3040830d779763de73c9fd665ffae1c47d758ad8..84545666a09c4d05ad643aba1e5f55911fcd5a52 100644 (file)
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
 
 int main(int argc, char **argv)
 {
-       unsigned long long num_loops = 2;
+       long long num_loops = 2;
        unsigned long timedelay = 1000000;
        unsigned long buf_len = 128;
 
index f6052e70bf403950eb658cd350337162a100df5f..a55cb8b10165abcf8a07d8228b590bbc1d8a0c08 100644 (file)
@@ -268,7 +268,7 @@ struct sockaddr_in {
 #define        IN_MULTICAST(a)         IN_CLASSD(a)
 #define        IN_MULTICAST_NET        0xe0000000
 
-#define        IN_BADCLASS(a)          ((((long int) (a) ) == 0xffffffff)
+#define        IN_BADCLASS(a)          (((long int) (a) ) == (long int)0xffffffff)
 #define        IN_EXPERIMENTAL(a)      IN_BADCLASS((a))
 
 #define        IN_CLASSE(a)            ((((long int) (a)) & 0xf0000000) == 0xf0000000)
index 095aebdc5bb733d5a0615e67f60f4069ac1a0135..e6150f21267d69a8ec8e34b9d14b8bb91701bf74 100644 (file)
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache.
 The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
 you to track down the cacheline contentions.
 
-The tool is based on x86's load latency and precise store facility events
-provided by Intel CPUs. These events provide:
+On x86, the tool is based on load latency and precise store facility events
+provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
+with thresholding feature.
+
+These events provide:
   - memory address of the access
   - type of the access (load and store details)
   - latency (in cycles) of the load access
@@ -46,7 +49,7 @@ RECORD OPTIONS
 
 -l::
 --ldlat::
-       Configure mem-loads latency.
+       Configure mem-loads latency. (x86 only)
 
 -k::
 --all-kernel::
@@ -119,11 +122,16 @@ Following perf record options are configured by default:
   -W,-d,--phys-data,--sample-cpu
 
 Unless specified otherwise with '-e' option, following events are monitored by
-default:
+default on x86:
 
   cpu/mem-loads,ldlat=30/P
   cpu/mem-stores/P
 
+and following on PowerPC:
+
+  cpu/mem-loads/
+  cpu/mem-stores/
+
 User can pass any 'perf record' option behind '--' mark, like (to enable
 callchains and system wide monitoring):
 
index f8d2167cf3e7a2221b8688aff0f0fa55968db369..199ea0f0a6c0c41f64ed91f7ffb1ad3d402a1160 100644 (file)
@@ -82,7 +82,7 @@ RECORD OPTIONS
        Be more verbose (show counter open errors, etc)
 
 --ldlat <n>::
-       Specify desired latency for loads event.
+       Specify desired latency for loads event. (x86 only)
 
 In addition, for report all perf report options are valid, and for record
 all perf record options.
index 2e6595310420104a40fe4ee813f41ee0bc213349..ba98bd00648809f6d1b1bddf9ade824a59af3e90 100644 (file)
@@ -2,6 +2,7 @@ libperf-y += header.o
 libperf-y += sym-handling.o
 libperf-y += kvm-stat.o
 libperf-y += perf_regs.o
+libperf-y += mem-events.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
new file mode 100644 (file)
index 0000000..d08311f
--- /dev/null
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "mem-events.h"
+
+/* PowerPC does not support 'ldlat' parameter. */
+char *perf_mem_events__name(int i)
+{
+       if (i == PERF_MEM_EVENTS__LOAD)
+               return (char *) "cpu/mem-loads/";
+
+       return (char *) "cpu/mem-stores/";
+}
index ed4583128b9ce2bd17a25c105c908a38f28a8eed..b36061cd1ab86887af34ef8cc345419b9cebc238 100644 (file)
@@ -2514,19 +2514,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
 
 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
-       struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+       bool found = false;
+       struct perf_evsel *evsel, *tmp;
+       struct parse_events_error err = { .idx = 0, };
+       int ret = parse_events(evlist, "probe:vfs_getname*", &err);
 
-       if (IS_ERR(evsel))
+       if (ret)
                return false;
 
-       if (perf_evsel__field(evsel, "pathname") == NULL) {
+       evlist__for_each_entry_safe(evlist, evsel, tmp) {
+               if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+                       continue;
+
+               if (perf_evsel__field(evsel, "pathname")) {
+                       evsel->handler = trace__vfs_getname;
+                       found = true;
+                       continue;
+               }
+
+               list_del_init(&evsel->node);
+               evsel->evlist = NULL;
                perf_evsel__delete(evsel);
-               return false;
        }
 
-       evsel->handler = trace__vfs_getname;
-       perf_evlist__add(evlist, evsel);
-       return true;
+       return found;
 }
 
 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
index 44090a9a19f3b9f33b0ae8eee972941a919fca4c..e952127e4fb0b453eab36540d9dff023128edf02 100644 (file)
@@ -1,6 +1,8 @@
 #! /usr/bin/python
 # SPDX-License-Identifier: GPL-2.0
 
+from __future__ import print_function
+
 import os
 import sys
 import glob
@@ -8,7 +10,11 @@ import optparse
 import tempfile
 import logging
 import shutil
-import ConfigParser
+
+try:
+    import configparser
+except ImportError:
+    import ConfigParser as configparser
 
 def data_equal(a, b):
     # Allow multiple values in assignment separated by '|'
@@ -100,20 +106,20 @@ class Event(dict):
     def equal(self, other):
         for t in Event.terms:
             log.debug("      [%s] %s %s" % (t, self[t], other[t]));
-            if not self.has_key(t) or not other.has_key(t):
+            if t not in self or t not in other:
                 return False
             if not data_equal(self[t], other[t]):
                 return False
         return True
 
     def optional(self):
-        if self.has_key('optional') and self['optional'] == '1':
+        if 'optional' in self and self['optional'] == '1':
             return True
         return False
 
     def diff(self, other):
         for t in Event.terms:
-            if not self.has_key(t) or not other.has_key(t):
+            if t not in self or t not in other:
                 continue
             if not data_equal(self[t], other[t]):
                 log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
@@ -134,7 +140,7 @@ class Event(dict):
 #   - expected values assignments
 class Test(object):
     def __init__(self, path, options):
-        parser = ConfigParser.SafeConfigParser()
+        parser = configparser.SafeConfigParser()
         parser.read(path)
 
         log.warning("running '%s'" % path)
@@ -193,7 +199,7 @@ class Test(object):
         return True
 
     def load_events(self, path, events):
-        parser_event = ConfigParser.SafeConfigParser()
+        parser_event = configparser.SafeConfigParser()
         parser_event.read(path)
 
         # The event record section header contains 'event' word,
@@ -207,7 +213,7 @@ class Test(object):
             # Read parent event if there's any
             if (':' in section):
                 base = section[section.index(':') + 1:]
-                parser_base = ConfigParser.SafeConfigParser()
+                parser_base = configparser.SafeConfigParser()
                 parser_base.read(self.test_dir + '/' + base)
                 base_items = parser_base.items('event')
 
@@ -322,9 +328,9 @@ def run_tests(options):
     for f in glob.glob(options.test_dir + '/' + options.test):
         try:
             Test(f, options).run()
-        except Unsup, obj:
+        except Unsup as obj:
             log.warning("unsupp  %s" % obj.getMsg())
-        except Notest, obj:
+        except Notest as obj:
             log.warning("skipped %s" % obj.getMsg())
 
 def setup_log(verbose):
@@ -363,7 +369,7 @@ def main():
     parser.add_option("-p", "--perf",
                       action="store", type="string", dest="perf")
     parser.add_option("-v", "--verbose",
-                      action="count", dest="verbose")
+                      default=0, action="count", dest="verbose")
 
     options, args = parser.parse_args()
     if args:
@@ -373,7 +379,7 @@ def main():
     setup_log(options.verbose)
 
     if not options.test_dir:
-        print 'FAILED no -d option specified'
+        print('FAILED no -d option specified')
         sys.exit(-1)
 
     if not options.test:
@@ -382,8 +388,8 @@ def main():
     try:
         run_tests(options)
 
-    except Fail, obj:
-        print "FAILED %s" % obj.getMsg();
+    except Fail as obj:
+        print("FAILED %s" % obj.getMsg())
         sys.exit(-1)
 
     sys.exit(0)
index 5f8501c68da49d56d8dee911b1760cbcb17d2d99..5cbba70bcdd0b5ff19c913ade3032f4a858dfe94 100644 (file)
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
                return -1;
        }
 
-       is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED);
+       is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
        if (should_be_signed && !is_signed) {
                pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
                         evsel->name, name, is_signed, should_be_signed);
index 89512504551b0b198a44ebe30e81d0972b86ce77..39c0004f288663a579b701a802ef3348906dda70 100644 (file)
@@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module)
        }
        PM.run(*Module);
 
-       return std::move(Buffer);
+       return Buffer;
 }
 
 }
index 93f74d8d3cdd96c97ea22354e9d38abfa518476e..42c3e5a229d24bae0719e4f2ad0e1e5e4b3770aa 100644 (file)
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
 static char mem_loads_name[100];
 static bool mem_loads_name__init;
 
-char *perf_mem_events__name(int i)
+char * __weak perf_mem_events__name(int i)
 {
        if (i == PERF_MEM_EVENTS__LOAD) {
                if (!mem_loads_name__init) {
index 66a84d5846c88ed912aff027943c6f8e9ff78ff2..dca7dfae69ad2b2884103aeb054b3634905e6fb8 100644 (file)
 #define EM_AARCH64     183  /* ARM 64 bit */
 #endif
 
+#ifndef ELF32_ST_VISIBILITY
+#define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
+
+/* For ELF64 the definitions are the same.  */
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
+#endif
+
+/* How to extract information held in the st_other field.  */
+#ifndef GELF_ST_VISIBILITY
+#define GELF_ST_VISIBILITY(val)        ELF64_ST_VISIBILITY (val)
+#endif
+
 typedef Elf64_Nhdr GElf_Nhdr;
 
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
@@ -87,6 +101,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
        return GELF_ST_TYPE(sym->st_info);
 }
 
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+       return GELF_ST_VISIBILITY(sym->st_other);
+}
+
 #ifndef STT_GNU_IFUNC
 #define STT_GNU_IFUNC 10
 #endif
@@ -111,7 +130,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
        return elf_sym__type(sym) == STT_NOTYPE &&
                sym->st_name != 0 &&
                sym->st_shndx != SHN_UNDEF &&
-               sym->st_shndx != SHN_ABS;
+               sym->st_shndx != SHN_ABS &&
+               elf_sym__visibility(sym) != STV_HIDDEN &&
+               elf_sym__visibility(sym) != STV_INTERNAL;
 }
 
 static bool elf_sym__filter(GElf_Sym *sym)
index 1a2bd15c5b6e53be68ac9f37ee6b6cf2de582424..400ee81a304332ba308397c9540ed5829f1bee39 100644 (file)
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf
 TARGETS += efivarfs
 TARGETS += exec
 TARGETS += filesystems
+TARGETS += filesystems/binderfs
 TARGETS += firmware
 TARGETS += ftrace
 TARGETS += futex
index 315a44fa32af3353bf6eb90fd3d324121319d319..84fd6f1bf33e7fd2b38d89c9c876bd35139eb94c 100644 (file)
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
        unsigned int start, end, possible_cpus = 0;
        char buff[128];
        FILE *fp;
-       int n;
+       int len, n, i, j = 0;
 
        fp = fopen(fcpu, "r");
        if (!fp) {
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
                exit(1);
        }
 
-       while (fgets(buff, sizeof(buff), fp)) {
-               n = sscanf(buff, "%u-%u", &start, &end);
-               if (n == 0) {
-                       printf("Failed to retrieve # possible CPUs!\n");
-                       exit(1);
-               } else if (n == 1) {
-                       end = start;
+       if (!fgets(buff, sizeof(buff), fp)) {
+               printf("Failed to read %s!\n", fcpu);
+               exit(1);
+       }
+
+       len = strlen(buff);
+       for (i = 0; i <= len; i++) {
+               if (buff[i] == ',' || buff[i] == '\0') {
+                       buff[i] = '\0';
+                       n = sscanf(&buff[j], "%u-%u", &start, &end);
+                       if (n <= 0) {
+                               printf("Failed to retrieve # possible CPUs!\n");
+                               exit(1);
+                       } else if (n == 1) {
+                               end = start;
+                       }
+                       possible_cpus += end - start + 1;
+                       j = i + 1;
                }
-               possible_cpus = start == 0 ? end + 1 : 0;
-               break;
        }
+
        fclose(fp);
 
        return possible_cpus;
index a0bd04befe87f53619cd126cc8fd65b07b22dc07..91420fa83b088e7e3ec4d63a0781bdcb40a070a9 100644 (file)
@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = {
 },
 
 {
-       .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)",
+       .descr = "func proto (TYPEDEF=>FUNC_PROTO)",
        .raw_types = {
                BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
                BTF_TYPE_INT_ENC(0, 0, 0, 32, 4),               /* [2] */
-               BTF_CONST_ENC(4),                               /* [3] */
-               BTF_TYPEDEF_ENC(NAME_TBD, 5),                   /* [4] */
-               BTF_FUNC_PROTO_ENC(0, 2),                       /* [5] */
+               BTF_TYPEDEF_ENC(NAME_TBD, 4),                   /* [3] */
+               BTF_FUNC_PROTO_ENC(0, 2),                       /* [4] */
                        BTF_FUNC_PROTO_ARG_ENC(0, 1),
                        BTF_FUNC_PROTO_ARG_ENC(0, 2),
                BTF_END_RAW,
@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = {
        .key_type_id = 1,
        .value_type_id = 1,
        .max_entries = 4,
-       .btf_load_err = true,
-       .err_str = "Invalid type_id",
 },
 
 {
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore
new file mode 100644 (file)
index 0000000..8a5d9bf
--- /dev/null
@@ -0,0 +1 @@
+binderfs_test
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
new file mode 100644 (file)
index 0000000..58cb659
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += -I../../../../../usr/include/
+TEST_GEN_PROGS := binderfs_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
new file mode 100644 (file)
index 0000000..8c2ed96
--- /dev/null
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <linux/android/binder.h>
+#include <linux/android/binderfs.h>
+#include "../../kselftest.h"
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+       ssize_t ret;
+again:
+       ret = write(fd, buf, count);
+       if (ret < 0 && errno == EINTR)
+               goto again;
+
+       return ret;
+}
+
+static void write_to_file(const char *filename, const void *buf, size_t count,
+                         int allowed_errno)
+{
+       int fd, saved_errno;
+       ssize_t ret;
+
+       fd = open(filename, O_WRONLY | O_CLOEXEC);
+       if (fd < 0)
+               ksft_exit_fail_msg("%s - Failed to open file %s\n",
+                                  strerror(errno), filename);
+
+       ret = write_nointr(fd, buf, count);
+       if (ret < 0) {
+               if (allowed_errno && (errno == allowed_errno)) {
+                       close(fd);
+                       return;
+               }
+
+               goto on_error;
+       }
+
+       if ((size_t)ret != count)
+               goto on_error;
+
+       close(fd);
+       return;
+
+on_error:
+       saved_errno = errno;
+       close(fd);
+       errno = saved_errno;
+
+       if (ret < 0)
+               ksft_exit_fail_msg("%s - Failed to write to file %s\n",
+                                  strerror(errno), filename);
+
+       ksft_exit_fail_msg("Failed to write to file %s\n", filename);
+}
+
+static void change_to_userns(void)
+{
+       int ret;
+       uid_t uid;
+       gid_t gid;
+       /* {g,u}id_map files only allow a max of 4096 bytes written to them */
+       char idmap[4096];
+
+       uid = getuid();
+       gid = getgid();
+
+       ret = unshare(CLONE_NEWUSER);
+       if (ret < 0)
+               ksft_exit_fail_msg("%s - Failed to unshare user namespace\n",
+                                  strerror(errno));
+
+       write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT);
+
+       ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid);
+       if (ret < 0 || (size_t)ret >= sizeof(idmap))
+               ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+                                  strerror(errno));
+
+       write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0);
+
+       ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid);
+       if (ret < 0 || (size_t)ret >= sizeof(idmap))
+               ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n",
+                                  strerror(errno));
+
+       write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0);
+
+       ret = setgid(0);
+       if (ret)
+               ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+                                  strerror(errno));
+
+       ret = setuid(0);
+       if (ret)
+               ksft_exit_fail_msg("%s - Failed to setgid(0)\n",
+                                  strerror(errno));
+}
+
+static void change_to_mountns(void)
+{
+       int ret;
+
+       ret = unshare(CLONE_NEWNS);
+       if (ret < 0)
+               ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n",
+                                  strerror(errno));
+
+       ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0);
+       if (ret < 0)
+               ksft_exit_fail_msg("%s - Failed to mount / as private\n",
+                                  strerror(errno));
+}
+
+static void rmdir_protect_errno(const char *dir)
+{
+       int saved_errno = errno;
+       (void)rmdir(dir);
+       errno = saved_errno;
+}
+
+static void __do_binderfs_test(void)
+{
+       int fd, ret, saved_errno;
+       size_t len;
+       ssize_t wret;
+       bool keep = false;
+       struct binderfs_device device = { 0 };
+       struct binder_version version = { 0 };
+
+       change_to_mountns();
+
+       ret = mkdir("/dev/binderfs", 0755);
+       if (ret < 0) {
+               if (errno != EEXIST)
+                       ksft_exit_fail_msg(
+                               "%s - Failed to create binderfs mountpoint\n",
+                               strerror(errno));
+
+               keep = true;
+       }
+
+       ret = mount(NULL, "/dev/binderfs", "binder", 0, 0);
+       if (ret < 0) {
+               if (errno != ENODEV)
+                       ksft_exit_fail_msg("%s - Failed to mount binderfs\n",
+                                          strerror(errno));
+
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_skip(
+                       "The Android binderfs filesystem is not available\n");
+       }
+
+       /* binderfs mount test passed */
+       ksft_inc_pass_cnt();
+
+       memcpy(device.name, "my-binder", strlen("my-binder"));
+
+       fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC);
+       if (fd < 0)
+               ksft_exit_fail_msg(
+                       "%s - Failed to open binder-control device\n",
+                       strerror(errno));
+
+       ret = ioctl(fd, BINDER_CTL_ADD, &device);
+       saved_errno = errno;
+       close(fd);
+       errno = saved_errno;
+       if (ret < 0) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg(
+                       "%s - Failed to allocate new binder device\n",
+                       strerror(errno));
+       }
+
+       ksft_print_msg(
+               "Allocated new binder device with major %d, minor %d, and name %s\n",
+               device.major, device.minor, device.name);
+
+       /* binder device allocation test passed */
+       ksft_inc_pass_cnt();
+
+       fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY);
+       if (fd < 0) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg("%s - Failed to open my-binder device\n",
+                                  strerror(errno));
+       }
+
+       ret = ioctl(fd, BINDER_VERSION, &version);
+       saved_errno = errno;
+       close(fd);
+       errno = saved_errno;
+       if (ret < 0) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg(
+                       "%s - Failed to open perform BINDER_VERSION request\n",
+                       strerror(errno));
+       }
+
+       ksft_print_msg("Detected binder version: %d\n",
+                      version.protocol_version);
+
+       /* binder transaction with binderfs binder device passed */
+       ksft_inc_pass_cnt();
+
+       ret = unlink("/dev/binderfs/my-binder");
+       if (ret < 0) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg("%s - Failed to delete binder device\n",
+                                  strerror(errno));
+       }
+
+       /* binder device removal passed */
+       ksft_inc_pass_cnt();
+
+       ret = unlink("/dev/binderfs/binder-control");
+       if (!ret) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg("Managed to delete binder-control device\n");
+       } else if (errno != EPERM) {
+               keep ? : rmdir_protect_errno("/dev/binderfs");
+               ksft_exit_fail_msg(
+                       "%s - Failed to delete binder-control device but exited with unexpected error code\n",
+                       strerror(errno));
+       }
+
+       /* binder-control device removal failed as expected */
+       ksft_inc_xfail_cnt();
+
+on_error:
+       ret = umount2("/dev/binderfs", MNT_DETACH);
+       keep ?: rmdir_protect_errno("/dev/binderfs");
+       if (ret < 0)
+               ksft_exit_fail_msg("%s - Failed to unmount binderfs\n",
+                                  strerror(errno));
+
+       /* binderfs unmount test passed */
+       ksft_inc_pass_cnt();
+}
+
+static void binderfs_test_privileged()
+{
+       if (geteuid() != 0)
+               ksft_print_msg(
+                       "Tests are not run as root. Skipping privileged tests\n");
+       else
+               __do_binderfs_test();
+}
+
+static void binderfs_test_unprivileged()
+{
+       change_to_userns();
+       __do_binderfs_test();
+}
+
+int main(int argc, char *argv[])
+{
+       binderfs_test_privileged();
+       binderfs_test_unprivileged();
+       ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
new file mode 100644 (file)
index 0000000..02dd6cc
--- /dev/null
@@ -0,0 +1,3 @@
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_BINDER_IPC=y
index 47ed6cef93fb8ff860f944223a176026821fbd54..c9ff2b47bd1ca3a2f70ee0683cb2b79b170c74f5 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 # Makefile for netfilter selftests
 
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
 
 include ../lib.mk
index 1017313e41a85e160f62a815d693836aaf1d4521..59caa8f71cd80e133b11acde1fb969077eebfecf 100644 (file)
@@ -1,2 +1,2 @@
 CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755 (executable)
index 0000000..8ec7668
--- /dev/null
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+  ip -net ns$i link set lo up
+  ip -net ns$i link set eth0 up
+  ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+  ip -net ns$i route add default via 10.0.$i.1
+  ip -net ns$i addr add dead:$i::99/64 dev eth0
+  ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+       local ns=$1
+       local counter=$2
+       local expect=$3
+
+       echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+       ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+       ns=$1
+       local lret=0
+
+       cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+       if [ $? -ne 0 ]; then
+               bad_counter $ns ns0in "packets 1 bytes 84"
+               lret=1
+       fi
+       cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+       if [ $? -ne 0 ]; then
+               bad_counter $ns ns0out "packets 1 bytes 84"
+               lret=1
+       fi
+
+       expect="packets 1 bytes 104"
+       cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+       if [ $? -ne 0 ]; then
+               bad_counter $ns ns0in6 "$expect"
+               lret=1
+       fi
+       cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+       if [ $? -ne 0 ]; then
+               bad_counter $ns ns0out6 "$expect"
+               lret=1
+       fi
+
+       return $lret
+}
+
+check_ns0_counters()
+{
+       local ns=$1
+       local lret=0
+
+       cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+       if [ $? -ne 0 ]; then
+               bad_counter ns0 ns0in "packets 0 bytes 0"
+               lret=1
+       fi
+
+       cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+       if [ $? -ne 0 ]; then
+               bad_counter ns0 ns0in6 "packets 0 bytes 0"
+               lret=1
+       fi
+
+       cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+       if [ $? -ne 0 ]; then
+               bad_counter ns0 ns0out "packets 0 bytes 0"
+               lret=1
+       fi
+       cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+       if [ $? -ne 0 ]; then
+               bad_counter ns0 ns0out6 "packets 0 bytes 0"
+               lret=1
+       fi
+
+       for dir in "in" "out" ; do
+               expect="packets 1 bytes 84"
+               cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 $ns$dir "$expect"
+                       lret=1
+               fi
+
+               expect="packets 1 bytes 104"
+               cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 $ns$dir6 "$expect"
+                       lret=1
+               fi
+       done
+
+       return $lret
+}
+
+reset_counters()
+{
+       for i in 0 1 2;do
+               ip netns exec ns$i nft reset counters inet > /dev/null
+       done
+}
+
+test_local_dnat6()
+{
+       local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+       chain output {
+               type nat hook output priority 0; policy accept;
+               ip6 daddr dead:1::99 dnat to dead:2::99
+       }
+}
+EOF
+       if [ $? -ne 0 ]; then
+               echo "SKIP: Could not add add ip6 dnat hook"
+               return $ksft_skip
+       fi
+
+       # ping netns1, expect rewrite to netns2
+       ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+       if [ $? -ne 0 ]; then
+               lret=1
+               echo "ERROR: ping6 failed"
+               return $lret
+       fi
+
+       expect="packets 0 bytes 0"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns2$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 0 count in ns1
+       expect="packets 0 bytes 0"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 1 packet in ns2
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+       ip netns exec ns0 nft flush chain ip6 nat output
+
+       return $lret
+}
+
+test_local_dnat()
+{
+       local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+       chain output {
+               type nat hook output priority 0; policy accept;
+               ip daddr 10.0.1.99 dnat to 10.0.2.99
+       }
+}
+EOF
+       # ping netns1, expect rewrite to netns2
+       ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+       if [ $? -ne 0 ]; then
+               lret=1
+               echo "ERROR: ping failed"
+               return $lret
+       fi
+
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns2$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 0 count in ns1
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 1 packet in ns2
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+       ip netns exec ns0 nft flush chain ip nat output
+
+       reset_counters
+       ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+       if [ $? -ne 0 ]; then
+               lret=1
+               echo "ERROR: ping failed"
+               return $lret
+       fi
+
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns2$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 1 count in ns1
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns0 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # expect 0 packet in ns2
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns2$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+       return $lret
+}
+
+
+test_masquerade6()
+{
+       local lret=0
+
+       ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+               return 1
+               lret=1
+       fi
+
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns2$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               meta oif veth0 masquerade
+       }
+}
+EOF
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+               lret=1
+       fi
+
+       # ns1 should have seen packets from ns0, due to masquerade
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # ns1 should not have seen packets from ns2, due to masquerade
+       expect="packets 0 bytes 0"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       ip netns exec ns0 nft flush chain ip6 nat postrouting
+       if [ $? -ne 0 ]; then
+               echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+               lret=1
+       fi
+
+       test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+       return $lret
+}
+
+test_masquerade()
+{
+       local lret=0
+
+       ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: canot ping ns1 from ns2"
+               lret=1
+       fi
+
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns2$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+       chain postrouting {
+               type nat hook postrouting priority 0; policy accept;
+               meta oif veth0 masquerade
+       }
+}
+EOF
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+               lret=1
+       fi
+
+       # ns1 should have seen packets from ns0, due to masquerade
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # ns1 should not have seen packets from ns2, due to masquerade
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       ip netns exec ns0 nft flush chain ip nat postrouting
+       if [ $? -ne 0 ]; then
+               echo "ERROR: Could not flush nat postrouting" 1>&2
+               lret=1
+       fi
+
+       test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+       return $lret
+}
+
+test_redirect6()
+{
+       local lret=0
+
+       ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+               lret=1
+       fi
+
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns2$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+       chain prerouting {
+               type nat hook prerouting priority 0; policy accept;
+               meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+       }
+}
+EOF
+       ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+               lret=1
+       fi
+
+       # ns1 should have seen no packets from ns2, due to redirection
+       expect="packets 0 bytes 0"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # ns0 should have seen packets from ns2, due to masquerade
+       expect="packets 1 bytes 104"
+       for dir in "in6" "out6" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       ip netns exec ns0 nft delete table ip6 nat
+       if [ $? -ne 0 ]; then
+               echo "ERROR: Could not delete ip6 nat table" 1>&2
+               lret=1
+       fi
+
+       test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+       return $lret
+}
+
+test_redirect()
+{
+       local lret=0
+
+       ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+       ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2"
+               lret=1
+       fi
+
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns2$dir "$expect"
+                       lret=1
+               fi
+
+               cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns2 ns1$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+       chain prerouting {
+               type nat hook prerouting priority 0; policy accept;
+               meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+       }
+}
+EOF
+       ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+       if [ $? -ne 0 ] ; then
+               echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+               lret=1
+       fi
+
+       # ns1 should have seen no packets from ns2, due to redirection
+       expect="packets 0 bytes 0"
+       for dir in "in" "out" ; do
+
+               cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       # ns0 should have seen packets from ns2, due to masquerade
+       expect="packets 1 bytes 84"
+       for dir in "in" "out" ; do
+               cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+               if [ $? -ne 0 ]; then
+                       bad_counter ns1 ns0$dir "$expect"
+                       lret=1
+               fi
+       done
+
+       ip netns exec ns0 nft delete table ip nat
+       if [ $? -ne 0 ]; then
+               echo "ERROR: Could not delete nat table" 1>&2
+               lret=1
+       fi
+
+       test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+       return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+       counter ns0in {}
+       counter ns1in {}
+       counter ns2in {}
+
+       counter ns0out {}
+       counter ns1out {}
+       counter ns2out {}
+
+       counter ns0in6 {}
+       counter ns1in6 {}
+       counter ns2in6 {}
+
+       counter ns0out6 {}
+       counter ns1out6 {}
+       counter ns2out6 {}
+
+       map nsincounter {
+               type ipv4_addr : counter
+               elements = { 10.0.1.1 : "ns0in",
+                            10.0.2.1 : "ns0in",
+                            10.0.1.99 : "ns1in",
+                            10.0.2.99 : "ns2in" }
+       }
+
+       map nsincounter6 {
+               type ipv6_addr : counter
+               elements = { dead:1::1 : "ns0in6",
+                            dead:2::1 : "ns0in6",
+                            dead:1::99 : "ns1in6",
+                            dead:2::99 : "ns2in6" }
+       }
+
+       map nsoutcounter {
+               type ipv4_addr : counter
+               elements = { 10.0.1.1 : "ns0out",
+                            10.0.2.1 : "ns0out",
+                            10.0.1.99: "ns1out",
+                            10.0.2.99: "ns2out" }
+       }
+
+       map nsoutcounter6 {
+               type ipv6_addr : counter
+               elements = { dead:1::1 : "ns0out6",
+                            dead:2::1 : "ns0out6",
+                            dead:1::99 : "ns1out6",
+                            dead:2::99 : "ns2out6" }
+       }
+
+       chain input {
+               type filter hook input priority 0; policy accept;
+               counter name ip saddr map @nsincounter
+               icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               counter name ip daddr map @nsoutcounter
+               icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+       }
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+  ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+  if [ $? -ne 0 ];then
+       echo "ERROR: Could not reach other namespace(s)" 1>&2
+       ret=1
+  fi
+
+  ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+  if [ $? -ne 0 ];then
+       echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+       ret=1
+  fi
+  check_counters ns$i
+  if [ $? -ne 0 ]; then
+       ret=1
+  fi
+
+  check_ns0_counters ns$i
+  if [ $? -ne 0 ]; then
+       ret=1
+  fi
+  reset_counters
+done
+
+if [ $ret -eq 0 ];then
+       echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
index 5ecea812cb6a24056ee5ab42aa585cb7f5ce15ab..585845203db89e5d2bc90722fd75bfcf595671d9 100644 (file)
@@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
        if (ops->init)
                ops->init(dev);
 
+       kvm_get_kvm(kvm);
        ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
        if (ret < 0) {
+               kvm_put_kvm(kvm);
                mutex_lock(&kvm->lock);
                list_del(&dev->vm_node);
                mutex_unlock(&kvm->lock);
@@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
                return ret;
        }
 
-       kvm_get_kvm(kvm);
        cd->fd = ret;
        return 0;
 }