Merge drm/drm-next into drm-intel-next-queued
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 11 Mar 2019 11:09:20 +0000 (13:09 +0200)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 11 Mar 2019 11:09:20 +0000 (13:09 +0200)
To facilitate merging topic/hdr-formats from Maarten.

Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
382 files changed:
CREDITS
Documentation/admin-guide/README.rst
Documentation/networking/dsa/dsa.txt
Documentation/networking/msg_zerocopy.rst
Documentation/networking/switchdev.txt
Documentation/process/applying-patches.rst
Documentation/translations/it_IT/admin-guide/README.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/uaccess.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/head.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/setup.c
arch/arc/lib/memcpy-archs.S
arch/arc/plat-hsdk/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/armada-xp-db.dts
arch/arm/boot/dts/armada-xp-gp.dts
arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
arch/arm/boot/dts/gemini-dlink-dir-685.dts
arch/arm/boot/dts/tegra124-nyan.dtsi
arch/arm/crypto/sha256-armv4.pl
arch/arm/crypto/sha256-core.S_shipped
arch/arm/crypto/sha512-armv4.pl
arch/arm/crypto/sha512-core.S_shipped
arch/arm/include/asm/irq.h
arch/arm/kernel/irq.c
arch/arm/kernel/smp.c
arch/arm/mm/dma-mapping.c
arch/arm/probes/kprobes/opt-arm.c
arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
arch/arm64/boot/dts/qcom/msm8998.dtsi
arch/arm64/crypto/chacha-neon-core.S
arch/arm64/include/asm/neon-intrinsics.h
arch/arm64/kernel/head.S
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/mm/kasan_init.c
arch/mips/bcm63xx/dev-enet.c
arch/mips/kernel/cmpxchg.c
arch/mips/kernel/setup.c
arch/mips/lantiq/xway/vmmc.c
arch/mips/net/ebpf_jit.c
arch/parisc/kernel/ptrace.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/s390/kvm/vsie.c
arch/sh/boot/dts/Makefile
arch/x86/include/asm/hyperv-tlfs.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/uaccess.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu.c
arch/x86/mm/extable.c
crypto/af_alg.c
drivers/base/power/runtime.c
drivers/clk/at91/at91sam9x5.c
drivers/clk/at91/sama5d2.c
drivers/clk/at91/sama5d4.c
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
drivers/cpufreq/scmi-cpufreq.c
drivers/crypto/ccree/cc_pm.h
drivers/gpio/gpio-mt7621.c
drivers/gpio/gpio-pxa.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/imx/Kconfig
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/ipuv3-plane.h
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/gpu/ipu-v3/ipu-prg.c
drivers/gpu/ipu-v3/ipu-prv.h
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/iommu/dmar.c
drivers/mailbox/bcm-flexrm-mailbox.c
drivers/mailbox/mailbox.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/queue.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/tmio_mmc.h
drivers/mmc/host/tmio_mmc_core.c
drivers/mtd/devices/powernv_flash.c
drivers/mtd/mtdcore.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/geneve.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/dp83867.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phylink.c
drivers/net/phy/realtek.c
drivers/net/phy/xilinx_gmii2rgmii.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/xen-netback/hash.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/pinctrl/qcom/pinctrl-qcs404.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-sas.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/bfa/bfad.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hptiop.c
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd_zbc.c
drivers/tee/optee/core.c
drivers/vhost/vhost.c
fs/afs/cell.c
fs/binfmt_script.c
fs/ceph/snap.c
fs/hugetlbfs/inode.c
fs/namespace.c
fs/nfs/nfs4idmap.c
fs/orangefs/file.c
fs/proc/base.c
include/drm/drm_drv.h
include/keys/request_key_auth-type.h [new file with mode: 0644]
include/keys/user-type.h
include/linux/key-type.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/phy.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/virtio_net.h
include/net/icmp.h
include/net/ip.h
include/net/phonet/pep.h
include/net/xfrm.h
include/uapi/drm/amdgpu_drm.h
include/video/imx-ipu-v3.h
init/initramfs.c
kernel/bpf/lpm_trie.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/sched/psi.c
kernel/trace/trace.c
kernel/trace/trace_kprobe.c
lib/Kconfig.kasan
lib/assoc_array.c
mm/debug.c
mm/hugetlb.c
mm/kasan/Makefile
mm/kasan/common.c
mm/kasan/tags.c
mm/kmemleak.c
mm/maccess.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/swap.c
mm/util.c
net/bpf/test_run.c
net/bridge/br_multicast.c
net/ceph/messenger.c
net/compat.c
net/core/dev.c
net/core/filter.c
net/core/skbuff.c
net/dsa/dsa2.c
net/dsa/port.c
net/ipv4/cipso_ipv4.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/icmp.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_options.c
net/ipv4/netlink.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/esp6.c
net/ipv6/fou6.c
net/ipv6/ip6_gre.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/udp.c
net/ipv6/xfrm6_tunnel.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/main.c
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mpls/af_mpls.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_tables_api.c
net/netlabel/netlabel_kapi.c
net/nfc/llcp_commands.c
net/nfc/llcp_core.c
net/phonet/pep.c
net/sched/act_ipt.c
net/sched/act_skbedit.c
net/sched/act_tunnel_key.c
net/sched/sch_netem.c
net/sctp/chunk.c
net/sctp/transport.c
net/smc/smc.h
net/socket.c
net/tipc/socket.c
net/unix/af_unix.c
net/unix/diag.c
net/x25/af_x25.c
net/xdp/xdp_umem.c
net/xdp/xsk.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/Makefile.kasan
scripts/kallsyms.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/lsm_audit.c
sound/pci/hda/patch_realtek.c
sound/soc/generic/simple-card.c
sound/soc/samsung/i2s.c
sound/soc/soc-topology.c
tools/testing/selftests/bpf/test_lpm_map.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/net/udpgso_bench_rx.c
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index e818eb6a3e71a485ad709a7e351dee9d763f35df..0175098d47769230b935f61c43fe9a614b0f7b08 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -842,10 +842,9 @@ D: ax25-utils maintainer.
 
 N: Helge Deller
 E: deller@gmx.de
-E: hdeller@redhat.de
-D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver
-S: Schimmelsrain 1
-S: D-69231 Rauenberg
+W: http://www.parisc-linux.org/
+D: PA-RISC Linux architecture maintainer
+D: LASI-, ASP-, WAX-, LCD/LED-driver
 S: Germany
 
 N: Jean Delvare
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape
 S: South Africa
 
 N: Grant Grundler
-E: grundler@parisc-linux.org
+E: grantgrundler@gmail.com
 W: http://obmouse.sourceforge.net/
 W: http://www.parisc-linux.org/
 D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206
 S: USA
 
 N: Kyle McMartin
-E: kyle@parisc-linux.org
+E: kyle@mcmartin.ca
 D: Linux/PARISC hacker
 D: AD1889 sound driver
 S: Ottawa, Canada
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct
 S: Cupertino, CA 95014
 S: USA
 
-N: Thibaut Varene
-E: T-Bone@parisc-linux.org
-W: http://www.parisc-linux.org/~varenet/
-P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C  FA2F 1E32 C3DA B7D2 F063
+N: Thibaut Varène
+E: hacks+kernel@slashdirt.org
+W: http://hacks.slashdirt.org/
 D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
 D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there
 D: AD1889 sound driver
-S: Paris, France
+S: France
 
 N: Heikki Vatiainen
 E: hessu@cs.tut.fi
index 0797eec76be139e328f47e5d91a62a0b2ee30d21..47e577264198d086fe5ba62f01aa1ad4b97de510 100644 (file)
@@ -1,9 +1,9 @@
 .. _readme:
 
-Linux kernel release 4.x <http://kernel.org/>
+Linux kernel release 5.x <http://kernel.org/>
 =============================================
 
-These are the release notes for Linux version 4.  Read them carefully,
+These are the release notes for Linux version 5.  Read them carefully,
 as they tell you what this is all about, explain how to install the
 kernel, and what to do if something goes wrong.
 
@@ -63,7 +63,7 @@ Installing the kernel source
    directory where you have permissions (e.g. your home directory) and
    unpack it::
 
-     xz -cd linux-4.X.tar.xz | tar xvf -
+     xz -cd linux-5.x.tar.xz | tar xvf -
 
    Replace "X" with the version number of the latest kernel.
 
@@ -72,26 +72,26 @@ Installing the kernel source
    files.  They should match the library, and not get messed up by
    whatever the kernel-du-jour happens to be.
 
- - You can also upgrade between 4.x releases by patching.  Patches are
+ - You can also upgrade between 5.x releases by patching.  Patches are
    distributed in the xz format.  To install by patching, get all the
    newer patch files, enter the top level directory of the kernel source
-   (linux-4.X) and execute::
+   (linux-5.x) and execute::
 
-     xz -cd ../patch-4.x.xz | patch -p1
+     xz -cd ../patch-5.x.xz | patch -p1
 
-   Replace "x" for all versions bigger than the version "X" of your current
+   Replace "x" for all versions bigger than the version "x" of your current
    source tree, **in_order**, and you should be ok.  You may want to remove
    the backup files (some-file-name~ or some-file-name.orig), and make sure
    that there are no failed patches (some-file-name# or some-file-name.rej).
    If there are, either you or I have made a mistake.
 
-   Unlike patches for the 4.x kernels, patches for the 4.x.y kernels
+   Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
    (also known as the -stable kernels) are not incremental but instead apply
-   directly to the base 4.x kernel.  For example, if your base kernel is 4.0
-   and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1
-   and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and
-   want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is,
-   patch -R) **before** applying the 4.0.3 patch. You can read more on this in
+   directly to the base 5.x kernel.  For example, if your base kernel is 5.0
+   and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
+   and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
+   want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
+   patch -R) **before** applying the 5.0.3 patch. You can read more on this in
    :ref:`Documentation/process/applying-patches.rst <applying_patches>`.
 
    Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
 Software requirements
 ---------------------
 
-   Compiling and running the 4.x kernels requires up-to-date
+   Compiling and running the 5.x kernels requires up-to-date
    versions of various software packages.  Consult
    :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
    required and how to get updates for these packages.  Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
    place for the output files (including .config).
    Example::
 
-     kernel source code: /usr/src/linux-4.X
+     kernel source code: /usr/src/linux-5.x
      build directory:    /home/name/build/kernel
 
    To configure and build the kernel, use::
 
-     cd /usr/src/linux-4.X
+     cd /usr/src/linux-5.x
      make O=/home/name/build/kernel menuconfig
      make O=/home/name/build/kernel
      sudo make O=/home/name/build/kernel modules_install install
index 25170ad7d25b4b58774f939743ddc3ef9d58d3d9..101f2b2c69ad14d003d674ed1f6d0de1995bcf93 100644 (file)
@@ -533,16 +533,12 @@ Bridge VLAN filtering
   function that the driver has to call for each VLAN the given port is a member
   of. A switchdev object is used to carry the VID and bridge flags.
 
-- port_fdb_prepare: bridge layer function invoked when the bridge prepares the
-  installation of a Forwarding Database entry. If the operation is not
-  supported, this function should return -EOPNOTSUPP to inform the bridge code
-  to fallback to a software implementation. No hardware setup must be done in
-  this function. See port_fdb_add for this and details.
-
 - port_fdb_add: bridge layer function invoked when the bridge wants to install a
   Forwarding Database entry, the switch hardware should be programmed with the
   specified address in the specified VLAN Id in the forwarding database
-  associated with this VLAN ID
+  associated with this VLAN ID. If the operation is not supported, this
+  function should return -EOPNOTSUPP to inform the bridge code to fallback to
+  a software implementation.
 
 Note: VLAN ID 0 corresponds to the port private database, which, in the context
 of DSA, would be the its port-based VLAN, used by the associated bridge device.
index fe46d4867e2dbfa4cde05cc0b900c157d99b67d9..18c1415e7bfad8f6e6e9b03febaf47f83a0f9915 100644 (file)
@@ -7,7 +7,7 @@ Intro
 =====
 
 The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
-The feature is currently implemented for TCP sockets.
+The feature is currently implemented for TCP and UDP sockets.
 
 
 Opportunity and Caveats
index 82236a17b5e65198be004d2cdd6a7c5bd8a9b7d4..97b7ca8b9b86bfd192753e71795ee6f8e9a4ef24 100644 (file)
@@ -92,11 +92,11 @@ device.
 Switch ID
 ^^^^^^^^^
 
-The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
-physical ID for each port of a switch.  The ID must be unique between switches
-on the same system.  The ID does not need to be unique between switches on
-different systems.
+The switchdev driver must implement the net_device operation
+ndo_get_port_parent_id for each port netdev, returning the same physical ID for
+each port of a switch. The ID must be unique between switches on the same
+system. The ID does not need to be unique between switches on different
+systems.
 
 The switch ID is used to locate ports on a switch and to know if aggregated
 ports belong to the same switch.
index dc2ddc3450442b5b8632b87ae21d8c6d43f46d10..fbb9297e6360ddbf022131f88250e952708a4f33 100644 (file)
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to
 generate a patch representing the differences between two patches and then
 apply the result.
 
-This will let you move from something like 4.7.2 to 4.7.3 in a single
+This will let you move from something like 5.7.2 to 5.7.3 in a single
 step. The -z flag to interdiff will even let you feed it patches in gzip or
 bzip2 compressed form directly without the use of zcat or bzcat or manual
 decompression.
 
-Here's how you'd go from 4.7.2 to 4.7.3 in a single step::
+Here's how you'd go from 5.7.2 to 5.7.3 in a single step::
 
-       interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1
+       interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1
 
 Although interdiff may save you a step or two you are generally advised to
 do the additional steps since interdiff can get things wrong in some cases.
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/
 Most recent patches are linked from the front page, but they also have
 specific homes.
 
-The 4.x.y (-stable) and 4.x patches live at
+The 5.x.y (-stable) and 5.x patches live at
 
-       https://www.kernel.org/pub/linux/kernel/v4.x/
+       https://www.kernel.org/pub/linux/kernel/v5.x/
 
-The -rc patches live at
+The -rc patches are not stored on the webserver but are generated on
+demand from git tags such as
 
-       https://www.kernel.org/pub/linux/kernel/v4.x/testing/
+       https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0
 
+The stable -rc patches live at
 
-The 4.x kernels
+       https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/
+
+
+The 5.x kernels
 ===============
 
 These are the base stable releases released by Linus. The highest numbered
 release is the most recent.
 
 If regressions or other serious flaws are found, then a -stable fix patch
-will be released (see below) on top of this base. Once a new 4.x base
+will be released (see below) on top of this base. Once a new 5.x base
 kernel is released, a patch is made available that is a delta between the
-previous 4.x kernel and the new one.
+previous 5.x kernel and the new one.
 
-To apply a patch moving from 4.6 to 4.7, you'd do the following (note
-that such patches do **NOT** apply on top of 4.x.y kernels but on top of the
-base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to
-first revert the 4.x.y patch).
+To apply a patch moving from 5.6 to 5.7, you'd do the following (note
+that such patches do **NOT** apply on top of 5.x.y kernels but on top of the
+base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to
+first revert the 5.x.y patch).
 
 Here are some examples::
 
-       # moving from 4.6 to 4.7
+       # moving from 5.6 to 5.7
 
-       $ cd ~/linux-4.6                # change to kernel source dir
-       $ patch -p1 < ../patch-4.7      # apply the 4.7 patch
+       $ cd ~/linux-5.6                # change to kernel source dir
+       $ patch -p1 < ../patch-5.7      # apply the 5.7 patch
        $ cd ..
-       $ mv linux-4.6 linux-4.7        # rename source dir
+       $ mv linux-5.6 linux-5.7        # rename source dir
 
-       # moving from 4.6.1 to 4.7
+       # moving from 5.6.1 to 5.7
 
-       $ cd ~/linux-4.6.1              # change to kernel source dir
-       $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch
-                                       # source dir is now 4.6
-       $ patch -p1 < ../patch-4.7      # apply new 4.7 patch
+       $ cd ~/linux-5.6.1              # change to kernel source dir
+       $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch
+                                       # source dir is now 5.6
+       $ patch -p1 < ../patch-5.7      # apply new 5.7 patch
        $ cd ..
-       $ mv linux-4.6.1 linux-4.7      # rename source dir
+       $ mv linux-5.6.1 linux-5.7      # rename source dir
 
 
-The 4.x.y kernels
+The 5.x.y kernels
 =================
 
 Kernels with 3-digit versions are -stable kernels. They contain small(ish)
 critical fixes for security problems or significant regressions discovered
-in a given 4.x kernel.
+in a given 5.x kernel.
 
 This is the recommended branch for users who want the most recent stable
 kernel and are not interested in helping test development/experimental
 versions.
 
-If no 4.x.y kernel is available, then the highest numbered 4.x kernel is
+If no 5.x.y kernel is available, then the highest numbered 5.x kernel is
 the current stable kernel.
 
 .. note::
@@ -308,23 +313,23 @@ the current stable kernel.
  The -stable team usually do make incremental patches available as well
  as patches against the latest mainline release, but I only cover the
  non-incremental ones below. The incremental ones can be found at
- https://www.kernel.org/pub/linux/kernel/v4.x/incr/
+ https://www.kernel.org/pub/linux/kernel/v5.x/incr/
 
-These patches are not incremental, meaning that for example the 4.7.3
-patch does not apply on top of the 4.7.2 kernel source, but rather on top
-of the base 4.7 kernel source.
+These patches are not incremental, meaning that for example the 5.7.3
+patch does not apply on top of the 5.7.2 kernel source, but rather on top
+of the base 5.7 kernel source.
 
-So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel
-source you have to first back out the 4.7.2 patch (so you are left with a
-base 4.7 kernel source) and then apply the new 4.7.3 patch.
+So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel
+source you have to first back out the 5.7.2 patch (so you are left with a
+base 5.7 kernel source) and then apply the new 5.7.3 patch.
 
 Here's a small example::
 
-       $ cd ~/linux-4.7.2              # change to the kernel source dir
-       $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch
-       $ patch -p1 < ../patch-4.7.3    # apply the new 4.7.3 patch
+       $ cd ~/linux-5.7.2              # change to the kernel source dir
+       $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch
+       $ patch -p1 < ../patch-5.7.3    # apply the new 5.7.3 patch
        $ cd ..
-       $ mv linux-4.7.2 linux-4.7.3    # rename the kernel source dir
+       $ mv linux-5.7.2 linux-5.7.3    # rename the kernel source dir
 
 The -rc kernels
 ===============
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing
 development kernels but do not want to run some of the really experimental
 stuff (such people should see the sections about -next and -mm kernels below).
 
-The -rc patches are not incremental, they apply to a base 4.x kernel, just
-like the 4.x.y patches described above. The kernel version before the -rcN
+The -rc patches are not incremental, they apply to a base 5.x kernel, just
+like the 5.x.y patches described above. The kernel version before the -rcN
 suffix denotes the version of the kernel that this -rc kernel will eventually
 turn into.
 
-So, 4.8-rc5 means that this is the fifth release candidate for the 4.8
-kernel and the patch should be applied on top of the 4.7 kernel source.
+So, 5.8-rc5 means that this is the fifth release candidate for the 5.8
+kernel and the patch should be applied on top of the 5.7 kernel source.
 
 Here are 3 examples of how to apply these patches::
 
-       # first an example of moving from 4.7 to 4.8-rc3
+       # first an example of moving from 5.7 to 5.8-rc3
 
-       $ cd ~/linux-4.7                        # change to the 4.7 source dir
-       $ patch -p1 < ../patch-4.8-rc3          # apply the 4.8-rc3 patch
+       $ cd ~/linux-5.7                        # change to the 5.7 source dir
+       $ patch -p1 < ../patch-5.8-rc3          # apply the 5.8-rc3 patch
        $ cd ..
-       $ mv linux-4.7 linux-4.8-rc3            # rename the source dir
+       $ mv linux-5.7 linux-5.8-rc3            # rename the source dir
 
-       # now let's move from 4.8-rc3 to 4.8-rc5
+       # now let's move from 5.8-rc3 to 5.8-rc5
 
-       $ cd ~/linux-4.8-rc3                    # change to the 4.8-rc3 dir
-       $ patch -p1 -R < ../patch-4.8-rc3       # revert the 4.8-rc3 patch
-       $ patch -p1 < ../patch-4.8-rc5          # apply the new 4.8-rc5 patch
+       $ cd ~/linux-5.8-rc3                    # change to the 5.8-rc3 dir
+       $ patch -p1 -R < ../patch-5.8-rc3       # revert the 5.8-rc3 patch
+       $ patch -p1 < ../patch-5.8-rc5          # apply the new 5.8-rc5 patch
        $ cd ..
-       $ mv linux-4.8-rc3 linux-4.8-rc5        # rename the source dir
+       $ mv linux-5.8-rc3 linux-5.8-rc5        # rename the source dir
 
-       # finally let's try and move from 4.7.3 to 4.8-rc5
+       # finally let's try and move from 5.7.3 to 5.8-rc5
 
-       $ cd ~/linux-4.7.3                      # change to the kernel source dir
-       $ patch -p1 -R < ../patch-4.7.3         # revert the 4.7.3 patch
-       $ patch -p1 < ../patch-4.8-rc5          # apply new 4.8-rc5 patch
+       $ cd ~/linux-5.7.3                      # change to the kernel source dir
+       $ patch -p1 -R < ../patch-5.7.3         # revert the 5.7.3 patch
+       $ patch -p1 < ../patch-5.8-rc5          # apply new 5.8-rc5 patch
        $ cd ..
-       $ mv linux-4.7.3 linux-4.8-rc5          # rename the kernel source dir
+       $ mv linux-5.7.3 linux-5.8-rc5          # rename the kernel source dir
 
 
 The -mm patches and the linux-next tree
index 80f5ffc94a9e6287dba8ab8c53dad2b7889f3b5f..b371668178426c947a7b2a936a4ea8f9c466255e 100644 (file)
@@ -4,7 +4,7 @@
 
 .. _it_readme:
 
-Rilascio del kernel Linux  4.x <http://kernel.org/>
+Rilascio del kernel Linux  5.x <http://kernel.org/>
 ===================================================
 
 .. warning::
index 35e6357f9d309c243659034a458c613373407883..4f463de6e721edc2e2ddfe645bc594b686037c83 100644 (file)
@@ -409,8 +409,7 @@ F:  drivers/platform/x86/wmi.c
 F:     include/uapi/linux/wmi.h
 
 AD1889 ALSA SOUND DRIVER
-M:     Thibaut Varene <T-Bone@parisc-linux.org>
-W:     http://wiki.parisc-linux.org/AD1889
+W:     https://parisc.wiki.kernel.org/index.php/AD1889
 L:     linux-parisc@vger.kernel.org
 S:     Maintained
 F:     sound/pci/ad1889.*
@@ -2865,7 +2864,7 @@ R:        Martin KaFai Lau <kafai@fb.com>
 R:     Song Liu <songliubraving@fb.com>
 R:     Yonghong Song <yhs@fb.com>
 L:     netdev@vger.kernel.org
-L:     linux-kernel@vger.kernel.org
+L:     bpf@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
 Q:     https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
@@ -2895,6 +2894,7 @@ N:        bpf
 BPF JIT for ARM
 M:     Shubham Bansal <illusionist.neo@gmail.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/arm/net/
 
@@ -2903,18 +2903,21 @@ M:      Daniel Borkmann <daniel@iogearbox.net>
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Zi Shen Lim <zlim.lnx@gmail.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Supported
 F:     arch/arm64/net/
 
 BPF JIT for MIPS (32-BIT AND 64-BIT)
 M:     Paul Burton <paul.burton@mips.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/mips/net/
 
 BPF JIT for NFP NICs
 M:     Jakub Kicinski <jakub.kicinski@netronome.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/netronome/nfp/bpf/
 
@@ -2922,6 +2925,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT)
 M:     Naveen N. Rao <naveen.n.rao@linux.ibm.com>
 M:     Sandipan Das <sandipan@linux.ibm.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/powerpc/net/
 
@@ -2929,6 +2933,7 @@ BPF JIT for S390
 M:     Martin Schwidefsky <schwidefsky@de.ibm.com>
 M:     Heiko Carstens <heiko.carstens@de.ibm.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/s390/net/
 X:     arch/s390/net/pnet.c
@@ -2936,12 +2941,14 @@ X:      arch/s390/net/pnet.c
 BPF JIT for SPARC (32-BIT AND 64-BIT)
 M:     David S. Miller <davem@davemloft.net>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/sparc/net/
 
 BPF JIT for X86 32-BIT
 M:     Wang YanQing <udknight@gmail.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/x86/net/bpf_jit_comp32.c
 
@@ -2949,6 +2956,7 @@ BPF JIT for X86 64-BIT
 M:     Alexei Starovoitov <ast@kernel.org>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Supported
 F:     arch/x86/net/
 X:     arch/x86/net/bpf_jit_comp32.c
@@ -3403,9 +3411,8 @@ F:        Documentation/media/v4l-drivers/cafe_ccic*
 F:     drivers/media/platform/marvell-ccic/
 
 CAIF NETWORK LAYER
-M:     Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Orphan
 F:     Documentation/networking/caif/
 F:     drivers/net/caif/
 F:     include/uapi/linux/caif/
@@ -8524,6 +8531,7 @@ L7 BPF FRAMEWORK
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     include/linux/skmsg.h
 F:     net/core/skmsg.c
@@ -11525,7 +11533,7 @@ F:      Documentation/blockdev/paride.txt
 F:     drivers/block/paride/
 
 PARISC ARCHITECTURE
-M:     "James E.J. Bottomley" <jejb@parisc-linux.org>
+M:     "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
 M:     Helge Deller <deller@gmx.de>
 L:     linux-parisc@vger.kernel.org
 W:     http://www.parisc-linux.org/
@@ -16751,6 +16759,7 @@ M:      Jesper Dangaard Brouer <hawk@kernel.org>
 M:     John Fastabend <john.fastabend@gmail.com>
 L:     netdev@vger.kernel.org
 L:     xdp-newbies@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Supported
 F:     net/core/xdp.c
 F:     include/net/xdp.h
@@ -16764,6 +16773,7 @@ XDP SOCKETS (AF_XDP)
 M:     Björn Töpel <bjorn.topel@intel.com>
 M:     Magnus Karlsson <magnus.karlsson@intel.com>
 L:     netdev@vger.kernel.org
+L:     bpf@vger.kernel.org
 S:     Maintained
 F:     kernel/bpf/xskmap.c
 F:     net/xdp/
index 96c5335e7ee4c06512c0d295facc12377bedd16a..d5713e7b1e5069fc69888880c4373818827710ac 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Shy Crocodile
 
 # *DOCUMENTATION*
index 376366a7db81c8c979628d22c37a1028fcc12fed..d750b302d5ab7c1729ce4967bc0bcc4578564eb7 100644 (file)
@@ -191,7 +191,6 @@ config NR_CPUS
 
 config ARC_SMP_HALT_ON_RESET
        bool "Enable Halt-on-reset boot mode"
-       default y if ARC_UBOOT_SUPPORT
        help
          In SMP configuration cores can be configured as Halt-on-reset
          or they could all start at same time. For Halt-on-reset, non
@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS
          (also referred to as r58:r59). These can also be used by gcc as GPR so
          kernel needs to save/restore per process
 
+config ARC_IRQ_NO_AUTOSAVE
+       bool "Disable hardware autosave regfile on interrupts"
+       default n
+       help
+         On HS cores, taken interrupt auto saves the regfile on stack.
+         This is programmable and can be optionally disabled in which case
+         software INTERRUPT_PROLOGUE/EPILGUE do the needed work
+
 endif  # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA
 
 endif
 
-config ARC_UBOOT_SUPPORT
-       bool "Support uboot arg Handling"
-       help
-         ARC Linux by default checks for uboot provided args as pointers to
-         external cmdline or DTB. This however breaks in absence of uboot,
-         when booting from Metaware debugger directly, as the registers are
-         not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
-         registers look like uboot args to kernel which then chokes.
-         So only enable the uboot arg checking/processing if users are sure
-         of uboot being in play.
-
 config ARC_BUILTIN_DTB_NAME
        string "Built in DTB"
        help
index 6e84060e7c90a2cbba081a46f87ab607aee1d22e..621f59407d7693057f642d64cfe31dbdb7cd7d9d 100644 (file)
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
 # CONFIG_ARC_HAS_LLSC is not set
 CONFIG_ARC_KVADDR_SIZE=402
 CONFIG_ARC_EMUL_UNALIGNED=y
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_PREEMPT=y
 CONFIG_NET=y
 CONFIG_UNIX=y
index 1e59a2e9c602fa2736cfc0d6fdd439b07a11105b..e447ace6fa1cab14f00f6ebadb7b15d2812c616a 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARC_PLAT_AXS10X=y
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
 CONFIG_PREEMPT=y
 CONFIG_NET=y
index b5c3f6c54b032d2a84510737272cacbe1ec89b1c..c82cdb10aaf4fba577b43188809a395298ee3c5e 100644 (file)
@@ -15,8 +15,6 @@ CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 # CONFIG_ARC_TIMERS_64BIT is not set
-# CONFIG_ARC_SMP_HALT_ON_RESET is not set
-CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
 CONFIG_NET=y
index f1b86cef09057ace18085cb1cb3f18b3be274852..a27eafdc82602f6856b00c5fbd82db72c33cf238 100644 (file)
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 {
 #endif
 };
 
+struct bcr_uarch_build_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       unsigned int pad:8, prod:8, maj:8, min:8;
+#else
+       unsigned int min:8, maj:8, prod:8, pad:8;
+#endif
+};
+
 struct bcr_mpy {
 #ifdef CONFIG_CPU_BIG_ENDIAN
        unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
index f393b663413e49ab38bf0d4070cb7ea9f39bcfd1..2ad77fb43639cd89fe2a00033790d565a2ff1935 100644 (file)
 #define cache_line_size()      SMP_CACHE_BYTES
 #define ARCH_DMA_MINALIGN      SMP_CACHE_BYTES
 
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN     8
+#endif
+
 extern void arc_cache_init(void);
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
 extern void read_decode_cache_bcr(void);
index 309f4e6721b3e22829847f88a4da884fdc9edf93..225e7df2d8ed8f71932ea5abccaa3390a624c16d 100644 (file)
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+       st.as   r9, [sp, -10]   ; save r9 in it's final stack slot
+       sub     sp, sp, 12      ; skip JLI, LDI, EI
+
+       PUSH    lp_count
+       PUSHAX  lp_start
+       PUSHAX  lp_end
+       PUSH    blink
+
+       PUSH    r11
+       PUSH    r10
+
+       sub     sp, sp, 4       ; skip r9
+
+       PUSH    r8
+       PUSH    r7
+       PUSH    r6
+       PUSH    r5
+       PUSH    r4
+       PUSH    r3
+       PUSH    r2
+       PUSH    r1
+       PUSH    r0
+.endif
+#endif
+
 #ifdef CONFIG_ARC_HAS_ACCL_REGS
        PUSH    r59
        PUSH    r58
        POP     r59
 #endif
 
+#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
+.ifnc \called_from, exception
+       POP     r0
+       POP     r1
+       POP     r2
+       POP     r3
+       POP     r4
+       POP     r5
+       POP     r6
+       POP     r7
+       POP     r8
+       POP     r9
+       POP     r10
+       POP     r11
+
+       POP     blink
+       POPAX   lp_end
+       POPAX   lp_start
+
+       POP     r9
+       mov     lp_count, r9
+
+       add     sp, sp, 12      ; skip JLI, LDI, EI
+       ld.as   r9, [sp, -10]   ; reload r9 which got clobbered
+.endif
+#endif
+
 .endm
 
 /*------------------------------------------------------------------------*/
index c9173c02081c0c3c81e136e3ae226562f5505b8e..eabc3efa6c6ddf9ba97a3f1ca7cd7379d144e2d9 100644 (file)
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
                */
                  "=&r" (tmp), "+r" (to), "+r" (from)
                :
-               : "lp_count", "lp_start", "lp_end", "memory");
+               : "lp_count", "memory");
 
                return n;
        }
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
                 */
                  "=&r" (tmp), "+r" (to), "+r" (from)
                :
-               : "lp_count", "lp_start", "lp_end", "memory");
+               : "lp_count", "memory");
 
                return n;
        }
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
        "       .previous                       \n"
        : "+r"(d_char), "+r"(res)
        : "i"(0)
-       : "lp_count", "lp_start", "lp_end", "memory");
+       : "lp_count", "memory");
 
        return res;
 }
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
        "       .previous                       \n"
        : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
        : "g"(-EFAULT), "r"(count)
-       : "lp_count", "lp_start", "lp_end", "memory");
+       : "lp_count", "memory");
 
        return res;
 }
index cc558a25b8fa690d1c72afed97f80161e4167db6..562089d62d9d68cf6fd0be3e9c1e607dfb1decc5 100644 (file)
@@ -209,7 +209,9 @@ restore_regs:
 ;####### Return from Intr #######
 
 debug_marker_l1:
-       bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+       ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot
+       btst    r0, STATUS_DE_BIT               ; Z flag set if bit clear
+       bnz     .Lintr_ret_to_delay_slot        ; branch if STATUS_DE_BIT set
 
 .Lisr_ret_fast_path:
        ; Handle special case #1: (Entry via Exception, Return via IRQ)
index 8b90d25a15cca8ebd334402848d98aa22f07b8bf..30e090625916160acb23df6bfa44e86bcad7192f 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/entry.h>
 #include <asm/arcregs.h>
 #include <asm/cache.h>
+#include <asm/irqflags.h>
 
 .macro CPU_EARLY_SETUP
 
        sr      r5, [ARC_REG_DC_CTRL]
 
 1:
+
+#ifdef CONFIG_ISA_ARCV2
+       ; Unaligned access is disabled at reset, so re-enable early as
+       ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+       ; by default
+       lr      r5, [status32]
+       bset    r5, r5, STATUS_AD_BIT
+       kflag   r5
+#endif
 .endm
 
        .section .init.text, "ax",@progbits
@@ -90,15 +100,13 @@ ENTRY(stext)
        st.ab   0, [r5, 4]
 1:
 
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
        ; Uboot - kernel ABI
        ;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
-       ;    r1 = magic number (board identity, unused as of now
+       ;    r1 = magic number (always zero as of now)
        ;    r2 = pointer to uboot provided cmdline or external DTB in mem
-       ; These are handled later in setup_arch()
+       ; These are handled later in handle_uboot_args()
        st      r0, [@uboot_tag]
        st      r2, [@uboot_arg]
-#endif
 
        ; setup "current" tsk and optionally cache it in dedicated r25
        mov     r9, @init_task
index 067ea362fb3efc3bc3a9217aaf197763eafb275e..cf18b3e5a934d34c684edcc7aa84533a10f932bf 100644 (file)
@@ -49,11 +49,13 @@ void arc_init_IRQ(void)
 
        *(unsigned int *)&ictrl = 0;
 
+#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
        ictrl.save_nr_gpr_pairs = 6;    /* r0 to r11 (r12 saved manually) */
        ictrl.save_blink = 1;
        ictrl.save_lp_regs = 1;         /* LP_COUNT, LP_START, LP_END */
        ictrl.save_u_to_u = 0;          /* user ctxt saved on kernel stack */
        ictrl.save_idx_regs = 1;        /* JLI, LDI, EI */
+#endif
 
        WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 
index feb90093e6b1354eaf60064d7d961c736ded3108..7b2340996cf80fc4ddc382c55d86acbe37d49bf5 100644 (file)
@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void)
                cpu->bpu.ret_stk = 4 << bpu.rse;
 
                if (cpu->core.family >= 0x54) {
-                       unsigned int exec_ctrl;
 
-                       READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
-                       cpu->extn.dual_enb = !(exec_ctrl & 1);
+                       struct bcr_uarch_build_arcv2 uarch;
 
-                       /* dual issue always present for this core */
-                       cpu->extn.dual = 1;
+                       /*
+                        * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
+                        * dual issue only (HS4x). But next uarch rev (1:0)
+                        * allows it be configured for single issue (HS3x)
+                        * Ensure we fiddle with dual issue only on HS4x
+                        */
+                       READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
+
+                       if (uarch.prod == 4) {
+                               unsigned int exec_ctrl;
+
+                               /* dual issue hardware always present */
+                               cpu->extn.dual = 1;
+
+                               READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
+
+                               /* dual issue hardware enabled ? */
+                               cpu->extn.dual_enb = !(exec_ctrl & 1);
+
+                       }
                }
        }
 
        READ_BCR(ARC_REG_AP_BCR, ap);
        if (ap.ver) {
                cpu->extn.ap_num = 2 << ap.num;
-               cpu->extn.ap_full = !!ap.min;
+               cpu->extn.ap_full = !ap.min;
        }
 
        READ_BCR(ARC_REG_SMART_BCR, bcr);
@@ -462,43 +478,78 @@ void setup_processor(void)
        arc_chk_core_config();
 }
 
-static inline int is_kernel(unsigned long addr)
+static inline bool uboot_arg_invalid(unsigned long addr)
 {
-       if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
-               return 1;
-       return 0;
+       /*
+        * Check that it is a untranslated address (although MMU is not enabled
+        * yet, it being a high address ensures this is not by fluke)
+        */
+       if (addr < PAGE_OFFSET)
+               return true;
+
+       /* Check that address doesn't clobber resident kernel image */
+       return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
 }
 
-void __init setup_arch(char **cmdline_p)
+#define IGNORE_ARGS            "Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE         0
+#define UBOOT_TAG_CMDLINE      1
+#define UBOOT_TAG_DTB          2
+
+void __init handle_uboot_args(void)
 {
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
-       /* make sure that uboot passed pointer to cmdline/dtb is valid */
-       if (uboot_tag && is_kernel((unsigned long)uboot_arg))
-               panic("Invalid uboot arg\n");
-
-       /* See if u-boot passed an external Device Tree blob */
-       machine_desc = setup_machine_fdt(uboot_arg);    /* uboot_tag == 2 */
-       if (!machine_desc)
-#endif
-       {
-               /* No, so try the embedded one */
+       bool use_embedded_dtb = true;
+       bool append_cmdline = false;
+
+       /* check that we know this tag */
+       if (uboot_tag != UBOOT_TAG_NONE &&
+           uboot_tag != UBOOT_TAG_CMDLINE &&
+           uboot_tag != UBOOT_TAG_DTB) {
+               pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+               goto ignore_uboot_args;
+       }
+
+       if (uboot_tag != UBOOT_TAG_NONE &&
+            uboot_arg_invalid((unsigned long)uboot_arg)) {
+               pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+               goto ignore_uboot_args;
+       }
+
+       /* see if U-boot passed an external Device Tree blob */
+       if (uboot_tag == UBOOT_TAG_DTB) {
+               machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+               /* external Device Tree blob is invalid - use embedded one */
+               use_embedded_dtb = !machine_desc;
+       }
+
+       if (uboot_tag == UBOOT_TAG_CMDLINE)
+               append_cmdline = true;
+
+ignore_uboot_args:
+
+       if (use_embedded_dtb) {
                machine_desc = setup_machine_fdt(__dtb_start);
                if (!machine_desc)
                        panic("Embedded DT invalid\n");
+       }
 
-               /*
-                * If we are here, it is established that @uboot_arg didn't
-                * point to DT blob. Instead if u-boot says it is cmdline,
-                * append to embedded DT cmdline.
-                * setup_machine_fdt() would have populated @boot_command_line
-                */
-               if (uboot_tag == 1) {
-                       /* Ensure a whitespace between the 2 cmdlines */
-                       strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
-                       strlcat(boot_command_line, uboot_arg,
-                               COMMAND_LINE_SIZE);
-               }
+       /*
+        * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+        * append processing can only happen after.
+        */
+       if (append_cmdline) {
+               /* Ensure a whitespace between the 2 cmdlines */
+               strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+               strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
        }
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+       handle_uboot_args();
 
        /* Save unparsed command line copy for /proc/cmdline */
        *cmdline_p = boot_command_line;
index d61044dd8b58e0e6620984468b2c5acc1fb2870b..ea14b0bf3116dfecb2655bd4a22816e05df13c4d 100644 (file)
 #endif
 
 #ifdef CONFIG_ARC_HAS_LL64
-# define PREFETCH_READ(RX)     prefetch    [RX, 56]
-# define PREFETCH_WRITE(RX)    prefetchw   [RX, 64]
 # define LOADX(DST,RX)         ldd.ab  DST, [RX, 8]
 # define STOREX(SRC,RX)                std.ab  SRC, [RX, 8]
 # define ZOLSHFT               5
 # define ZOLAND                        0x1F
 #else
-# define PREFETCH_READ(RX)     prefetch    [RX, 28]
-# define PREFETCH_WRITE(RX)    prefetchw   [RX, 32]
 # define LOADX(DST,RX)         ld.ab   DST, [RX, 4]
 # define STOREX(SRC,RX)                st.ab   SRC, [RX, 4]
 # define ZOLSHFT               4
@@ -41,8 +37,6 @@
 #endif
 
 ENTRY_CFI(memcpy)
-       prefetch [r1]           ; Prefetch the read location
-       prefetchw [r0]          ; Prefetch the write location
        mov.f   0, r2
 ;;; if size is zero
        jz.d    [blink]
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
        lpnz    @.Lcopy32_64bytes
        ;; LOOP START
        LOADX (r6, r1)
-       PREFETCH_READ (r1)
-       PREFETCH_WRITE (r3)
        LOADX (r8, r1)
        LOADX (r10, r1)
        LOADX (r4, r1)
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
        lpnz    @.Lcopy8bytes_1
        ;; LOOP START
        ld.ab   r6, [r1, 4]
-       prefetch [r1, 28]       ;Prefetch the next read location
        ld.ab   r8, [r1,4]
-       prefetchw [r3, 32]      ;Prefetch the next write location
 
        SHIFT_1 (r7, r6, 24)
        or      r7, r7, r5
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
        lpnz    @.Lcopy8bytes_2
        ;; LOOP START
        ld.ab   r6, [r1, 4]
-       prefetch [r1, 28]       ;Prefetch the next read location
        ld.ab   r8, [r1,4]
-       prefetchw [r3, 32]      ;Prefetch the next write location
 
        SHIFT_1 (r7, r6, 16)
        or      r7, r7, r5
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
        lpnz    @.Lcopy8bytes_3
        ;; LOOP START
        ld.ab   r6, [r1, 4]
-       prefetch [r1, 28]       ;Prefetch the next read location
        ld.ab   r8, [r1,4]
-       prefetchw [r3, 32]      ;Prefetch the next write location
 
        SHIFT_1 (r7, r6, 8)
        or      r7, r7, r5
index f25c085b9874c9c3c922e37f721109d769491f94..23e00216e5a52da6f53f200289049452e5134b47 100644 (file)
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK
        bool "ARC HS Development Kit SOC"
        depends on ISA_ARCV2
        select ARC_HAS_ACCL_REGS
+       select ARC_IRQ_NO_AUTOSAVE
        select CLK_HSDK
        select RESET_HSDK
        select HAVE_PCI
index 664e918e26249a6dd0a43fa1c80eaf187a040821..26524b75970a3ef52bf7609fefa3626f07afcb2a 100644 (file)
@@ -1400,6 +1400,7 @@ config NR_CPUS
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
        depends on SMP
+       select GENERIC_IRQ_MIGRATION
        help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
index b67f5fee146954e0812b8420bfb44f8aec6cadaa..dce5be5df97bd91abe3ff039e8befab58656124b 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii-txid";
+       phy-mode = "rgmii-id";
 };
 
 &tscadc {
index 172c0224e7f6c96df6a5b05649ddf2060bb02578..b128998097ce7180cb2a72291bb83ea2a19d0f52 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii-txid";
+       phy-mode = "rgmii-id";
        dual_emac_res_vlan = <1>;
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii-txid";
+       phy-mode = "rgmii-id";
        dual_emac_res_vlan = <2>;
 };
 
index f3ac7483afed0997ff527e8c8cef68c3a7307367..5d04dc68cf5795ef8e61533cbf0b0f8bb9dc0546 100644 (file)
                                status = "okay";
                        };
 
-                       nand@d0000 {
+                       nand-controller@d0000 {
                                status = "okay";
-                               label = "pxa3xx_nand-0";
-                               num-cs = <1>;
-                               marvell,nand-keep-config;
-                               nand-on-flash-bbt;
-
-                               partitions {
-                                       compatible = "fixed-partitions";
-                                       #address-cells = <1>;
-                                       #size-cells = <1>;
-
-                                       partition@0 {
-                                               label = "U-Boot";
-                                               reg = <0 0x800000>;
-                                       };
-                                       partition@800000 {
-                                               label = "Linux";
-                                               reg = <0x800000 0x800000>;
-                                       };
-                                       partition@1000000 {
-                                               label = "Filesystem";
-                                               reg = <0x1000000 0x3f000000>;
 
+                               nand@0 {
+                                       reg = <0>;
+                                       label = "pxa3xx_nand-0";
+                                       nand-rb = <0>;
+                                       nand-on-flash-bbt;
+
+                                       partitions {
+                                               compatible = "fixed-partitions";
+                                               #address-cells = <1>;
+                                               #size-cells = <1>;
+
+                                               partition@0 {
+                                                       label = "U-Boot";
+                                                       reg = <0 0x800000>;
+                                               };
+                                               partition@800000 {
+                                                       label = "Linux";
+                                                       reg = <0x800000 0x800000>;
+                                               };
+                                               partition@1000000 {
+                                                       label = "Filesystem";
+                                                       reg = <0x1000000 0x3f000000>;
+                                               };
                                        };
                                };
                        };
index 1139e9469a83792efc102ff2c8dd375d1c5591b3..b4cca507cf1361b6c534a68f1961b660ef0fece8 100644 (file)
                                status = "okay";
                        };
 
-                       nand@d0000 {
+                       nand-controller@d0000 {
                                status = "okay";
-                               label = "pxa3xx_nand-0";
-                               num-cs = <1>;
-                               marvell,nand-keep-config;
-                               nand-on-flash-bbt;
+
+                               nand@0 {
+                                       reg = <0>;
+                                       label = "pxa3xx_nand-0";
+                                       nand-rb = <0>;
+                                       nand-on-flash-bbt;
+                               };
                        };
                };
 
index bbbb38888bb89db8ef5b1814873b7285341e7590..87dcb502f72da5fdab843d38516f8fc85d91387b 100644 (file)
 
                        };
 
-                       nand@d0000 {
+                       nand-controller@d0000 {
                                status = "okay";
-                               label = "pxa3xx_nand-0";
-                               num-cs = <1>;
-                               marvell,nand-keep-config;
-                               nand-on-flash-bbt;
-
-                               partitions {
-                                       compatible = "fixed-partitions";
-                                       #address-cells = <1>;
-                                       #size-cells = <1>;
-
-                                       partition@0 {
-                                               label = "u-boot";
-                                               reg = <0x00000000 0x000e0000>;
-                                               read-only;
-                                       };
-
-                                       partition@e0000 {
-                                               label = "u-boot-env";
-                                               reg = <0x000e0000 0x00020000>;
-                                               read-only;
-                                       };
-
-                                       partition@100000 {
-                                               label = "u-boot-env2";
-                                               reg = <0x00100000 0x00020000>;
-                                               read-only;
-                                       };
-
-                                       partition@120000 {
-                                               label = "zImage";
-                                               reg = <0x00120000 0x00400000>;
-                                       };
-
-                                       partition@520000 {
-                                               label = "initrd";
-                                               reg = <0x00520000 0x00400000>;
-                                       };
 
-                                       partition@e00000 {
-                                               label = "boot";
-                                               reg = <0x00e00000 0x3f200000>;
+                               nand@0 {
+                                       reg = <0>;
+                                       label = "pxa3xx_nand-0";
+                                       nand-rb = <0>;
+                                       nand-on-flash-bbt;
+
+                                       partitions {
+                                               compatible = "fixed-partitions";
+                                               #address-cells = <1>;
+                                               #size-cells = <1>;
+
+                                               partition@0 {
+                                                       label = "u-boot";
+                                                       reg = <0x00000000 0x000e0000>;
+                                                       read-only;
+                                               };
+
+                                               partition@e0000 {
+                                                       label = "u-boot-env";
+                                                       reg = <0x000e0000 0x00020000>;
+                                                       read-only;
+                                               };
+
+                                               partition@100000 {
+                                                       label = "u-boot-env2";
+                                                       reg = <0x00100000 0x00020000>;
+                                                       read-only;
+                                               };
+
+                                               partition@120000 {
+                                                       label = "zImage";
+                                                       reg = <0x00120000 0x00400000>;
+                                               };
+
+                                               partition@520000 {
+                                                       label = "initrd";
+                                                       reg = <0x00520000 0x00400000>;
+                                               };
+
+                                               partition@e00000 {
+                                                       label = "boot";
+                                                       reg = <0x00e00000 0x3f200000>;
+                                               };
                                        };
                                };
                        };
index cc0c3cf89eaad5842f26c3d42bf1b71b64c567bd..592111c8d6fdfcd299a316b2bf02d757c6f40cef 100644 (file)
                };
 
                display-controller@6a000000 {
-                       status = "disabled";
+                       status = "okay";
 
                        port@0 {
                                reg = <0>;
index d5f11d6d987ea52236b113b49e761b329c948e08..bc85b6a166c79e9b079d184e4ddac0d3e26d7067 100644 (file)
                stdout-path = "serial0:115200n8";
        };
 
-       memory@80000000 {
+       /*
+        * Note that recent version of the device tree compiler (starting with
+        * version 1.4.2) warn about this node containing a reg property, but
+        * missing a unit-address. However, the bootloader on these Chromebook
+        * devices relies on the full name of this node to be exactly /memory.
+        * Adding the unit-address causes the bootloader to create a /memory
+        * node and write the memory bank configuration to that node, which in
+        * turn leads the kernel to believe that the device has 2 GiB of
+        * memory instead of the amount detected by the bootloader.
+        *
+        * The name of this node is effectively ABI and must not be changed.
+        */
+       memory {
+               device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x80000000>;
        };
 
+       /delete-node/ memory@80000000;
+
        host1x@50000000 {
                hdmi@54280000 {
                        status = "okay";
index b9ec44060ed313dac5c469e4e6e39e905d0206cc..a03cf4dfb7818d1275ee0d06ecf633b149adf1cd 100644 (file)
@@ -212,10 +212,11 @@ K256:
 .global        sha256_block_data_order
 .type  sha256_block_data_order,%function
 sha256_block_data_order:
+.Lsha256_block_data_order:
 #if __ARM_ARCH__<7
        sub     r3,pc,#8                @ sha256_block_data_order
 #else
-       adr     r3,sha256_block_data_order
+       adr     r3,.Lsha256_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
index 3b58300d611cf4c94f9014901d0a3204acb75f13..054aae0edfce5628715d607fd7d194ac5cc81938 100644 (file)
@@ -93,10 +93,11 @@ K256:
 .global        sha256_block_data_order
 .type  sha256_block_data_order,%function
 sha256_block_data_order:
+.Lsha256_block_data_order:
 #if __ARM_ARCH__<7
        sub     r3,pc,#8                @ sha256_block_data_order
 #else
-       adr     r3,sha256_block_data_order
+       adr     r3,.Lsha256_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
index fb5d15048c0b2d2ea2d8c8c9a2bdbb15a6c14052..788c17b56ecceb5e607382539faec13b8fbc2886 100644 (file)
@@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
 .global        sha512_block_data_order
 .type  sha512_block_data_order,%function
 sha512_block_data_order:
+.Lsha512_block_data_order:
 #if __ARM_ARCH__<7
        sub     r3,pc,#8                @ sha512_block_data_order
 #else
-       adr     r3,sha512_block_data_order
+       adr     r3,.Lsha512_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
index b1c334a49cdaa61f019b7367bfa4e20d4ac4a078..710ea309769e71628d1d4a166c834ad92033b658 100644 (file)
@@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
 .global        sha512_block_data_order
 .type  sha512_block_data_order,%function
 sha512_block_data_order:
+.Lsha512_block_data_order:
 #if __ARM_ARCH__<7
        sub     r3,pc,#8                @ sha512_block_data_order
 #else
-       adr     r3,sha512_block_data_order
+       adr     r3,.Lsha512_block_data_order
 #endif
 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
        ldr     r12,.LOPENSSL_armcap
index c883fcbe93b67ef68bfc18a6e48d4ec53c37cdd0..46d41140df27dd9c4f15c713189db2199c1352a2 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef __ASSEMBLY__
 struct irqaction;
 struct pt_regs;
-extern void migrate_irqs(void);
 
 extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
index 9908dacf9229fbfa694ceebdfb2ed1b534c3f522..844861368cd5c236a113adaeab8a26d43d8ac419 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/seq_file.h>
-#include <linux/ratelimit.h>
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/kallsyms.h>
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void)
        return nr_irqs;
 }
 #endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
-       struct irq_data *d = irq_desc_get_irq_data(desc);
-       const struct cpumask *affinity = irq_data_get_affinity_mask(d);
-       struct irq_chip *c;
-       bool ret = false;
-
-       /*
-        * If this is a per-CPU interrupt, or the affinity does not
-        * include this CPU, then we have nothing to do.
-        */
-       if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
-               return false;
-
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
-               ret = true;
-       }
-
-       c = irq_data_get_irq_chip(d);
-       if (!c->irq_set_affinity)
-               pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-               cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
-       return ret;
-}
-
-/*
- * The current CPU has been marked offline.  Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
-       unsigned int i;
-       struct irq_desc *desc;
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       for_each_irq_desc(i, desc) {
-               bool affinity_broken;
-
-               raw_spin_lock(&desc->lock);
-               affinity_broken = migrate_one_irq(desc);
-               raw_spin_unlock(&desc->lock);
-
-               if (affinity_broken)
-                       pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
-                               i, smp_processor_id());
-       }
-
-       local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
index 3bf82232b1bed4bce829749ce6af885bbc43c191..1d6f5ea522f49184c53a7d996769104107b4de8e 100644 (file)
@@ -254,7 +254,7 @@ int __cpu_disable(void)
        /*
         * OK - migrate IRQs away from this CPU
         */
-       migrate_irqs();
+       irq_migrate_all_off_this_cpu();
 
        /*
         * Flush user cache and TLB mappings, and then remove this CPU
index f1e2922e447cd07a6acca27123d3254f4fd10ba8..1e3e08a1c45677e66017cb6479049658db90dbde 100644 (file)
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev)
                return;
 
        arm_teardown_iommu_dma_ops(dev);
+       /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+       set_dma_ops(dev, NULL);
 }
index 2c118a6ab358736e8227214b081fce343b48b29f..0dc23fc227ed2745215eeda46965dcac1524281b 100644 (file)
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
        }
 
        /* Copy arch-dep-instance from template. */
-       memcpy(code, (unsigned char *)optprobe_template_entry,
+       memcpy(code, (unsigned long *)&optprobe_template_entry,
                        TMPL_END_IDX * sizeof(kprobe_opcode_t));
 
        /* Adjust buffer according to instruction. */
index 5b4a9609e31f62a00354ae670b15e586e664ecc7..2468762283a5c295213ef797407fdae4135c1dab 100644 (file)
                reg = <0>;
                pinctrl-names = "default";
                pinctrl-0 = <&cp0_copper_eth_phy_reset>;
-               reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>;
+               reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>;
                reset-assert-us = <10000>;
        };
 
index 8d41b69ec2dab077eb535c26386d385392fca339..99bccaac31ad8783da3f19c11ca2af7a9355c317 100644 (file)
@@ -37,7 +37,7 @@
                };
 
                memory@86200000 {
-                       reg = <0x0 0x86200000 0x0 0x2600000>;
+                       reg = <0x0 0x86200000 0x0 0x2d00000>;
                        no-map;
                };
 
index 021bb9e9784b271cb670f8c973312e9e2ec14ba4..706c4e10e9e294c7c5de49dbbe7a784ec7ca1458 100644 (file)
@@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon)
        mov             w3, w2
        bl              chacha_permute
 
-       st1             {v0.16b}, [x1], #16
-       st1             {v3.16b}, [x1]
+       st1             {v0.4s}, [x1], #16
+       st1             {v3.4s}, [x1]
 
        ldp             x29, x30, [sp], #16
        ret
@@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon)
        add             v3.4s, v3.4s, v19.4s
          add           a2, a2, w8
          add           a3, a3, w9
+CPU_BE(          rev           a0, a0          )
+CPU_BE(          rev           a1, a1          )
+CPU_BE(          rev           a2, a2          )
+CPU_BE(          rev           a3, a3          )
 
        ld4r            {v24.4s-v27.4s}, [x0], #16
        ld4r            {v28.4s-v31.4s}, [x0]
@@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon)
        add             v7.4s, v7.4s, v23.4s
          add           a6, a6, w8
          add           a7, a7, w9
+CPU_BE(          rev           a4, a4          )
+CPU_BE(          rev           a5, a5          )
+CPU_BE(          rev           a6, a6          )
+CPU_BE(          rev           a7, a7          )
 
        // x8[0-3] += s2[0]
        // x9[0-3] += s2[1]
@@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon)
        add             v11.4s, v11.4s, v27.4s
          add           a10, a10, w8
          add           a11, a11, w9
+CPU_BE(          rev           a8, a8          )
+CPU_BE(          rev           a9, a9          )
+CPU_BE(          rev           a10, a10        )
+CPU_BE(          rev           a11, a11        )
 
        // x12[0-3] += s3[0]
        // x13[0-3] += s3[1]
@@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon)
        add             v15.4s, v15.4s, v31.4s
          add           a14, a14, w8
          add           a15, a15, w9
+CPU_BE(          rev           a12, a12        )
+CPU_BE(          rev           a13, a13        )
+CPU_BE(          rev           a14, a14        )
+CPU_BE(          rev           a15, a15        )
 
        // interleave 32-bit words in state n, n+1
          ldp           w6, w7, [x2], #64
index 2ba6c6b9541f3a3f7b09de437b1eff60b56a511f..71abfc7612b2ff59f3abde8064e56ebf2434cbd2 100644 (file)
@@ -36,4 +36,8 @@
 #include <arm_neon.h>
 #endif
 
+#ifdef CONFIG_CC_IS_CLANG
+#pragma clang diagnostic ignored "-Wincompatible-pointer-types"
+#endif
+
 #endif /* __ASM_NEON_INTRINSICS_H */
index 15d79a8e5e5e414350b9f869dad48776767c762b..eecf7927dab08bf66176841004f7e39bc7605c47 100644 (file)
@@ -539,8 +539,7 @@ set_hcr:
        /* GICv3 system register access */
        mrs     x0, id_aa64pfr0_el1
        ubfx    x0, x0, #24, #4
-       cmp     x0, #1
-       b.ne    3f
+       cbz     x0, 3f
 
        mrs_s   x0, SYS_ICC_SRE_EL2
        orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
index 9dce33b0e26042b4d70e8a1ae35c0b1f39195e05..ddaea0fd2fa4bba34bd79988f6fcd72470e6e537 100644 (file)
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
 }
 
 /*
- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
- * We also take into account DIT (bit 24), which is not yet documented, and
- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
- * allocated an EL0 meaning in future.
+ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
+ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
+ * not described in ARM DDI 0487D.a.
+ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
+ * be allocated an EL0 meaning in future.
  * Userspace cannot use these until they have an architectural meaning.
  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
  * We also reserve IL for the kernel; SS is handled dynamically.
  */
 #define SPSR_EL1_AARCH64_RES0_BITS \
-       (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
-        GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
+       (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+        GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
 #define SPSR_EL1_AARCH32_RES0_BITS \
-       (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
+       (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
 
 static int valid_compat_regs(struct user_pt_regs *regs)
 {
index d09ec76f08cfcee42e9fcc4a12e8a1760e8ac7bb..0098493282898457c5963fefc16d69c5946b45e8 100644 (file)
@@ -339,6 +339,9 @@ void __init setup_arch(char **cmdline_p)
        smp_init_cpus();
        smp_build_mpidr_hash();
 
+       /* Init percpu seeds for random tags after cpus are set up. */
+       kasan_init_tags();
+
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
        /*
         * Make sure init_thread_info.ttbr0 always generates translation
index 4b55b15707a33e5a5c7c202977845f86e847bda5..f37a86d2a69da5d8a93a53f43b5a9d075c6ba2a4 100644 (file)
@@ -252,8 +252,6 @@ void __init kasan_init(void)
        memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 
-       kasan_init_tags();
-
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;
        pr_info("KernelAddressSanitizer initialized\n");
index 07b4c65a88a43708467626b410baf79936bac203..8e73d65f348064792e5a1515ddcd7809b12da2c8 100644 (file)
@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
 
 static int shared_device_registered;
 
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
 static struct resource enet0_res[] = {
        {
                .start          = -1, /* filled at runtime */
@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
        .resource       = enet0_res,
        .dev            = {
                .platform_data = &enet0_pd,
+               .dma_mask = &enet_dmamask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
        .resource       = enet1_res,
        .dev            = {
                .platform_data = &enet1_pd,
+               .dma_mask = &enet_dmamask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
        .resource       = enetsw_res,
        .dev            = {
                .platform_data = &enetsw_pd,
+               .dma_mask = &enet_dmamask,
+               .coherent_dma_mask = DMA_BIT_MASK(32),
        },
 };
 
index 0b9535bc2c53d0c450a0c02beb24691d356614e2..6b2a4a902a981c7365cb05abc94744cfc6648741 100644 (file)
@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
                              unsigned long new, unsigned int size)
 {
-       u32 mask, old32, new32, load32;
+       u32 mask, old32, new32, load32, load;
        volatile u32 *ptr32;
        unsigned int shift;
-       u8 load;
 
        /* Check that ptr is naturally aligned */
        WARN_ON((unsigned long)ptr & (size - 1));
index 8c6c48ed786a1527c22ba5b46bcdad70029e5865..d2e5a5ad0e6f5e4b90e5f1a20c7942f40a1c735d 100644 (file)
@@ -384,7 +384,8 @@ static void __init bootmem_init(void)
        init_initrd();
        reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
 
-       memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
+       memblock_reserve(PHYS_OFFSET,
+                        (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
 
        /*
         * max_low_pfn is not a number of pages. The number of pages
index 577ec81b557dcfa2d4805ed39cbcaffb1f7052d1..3deab9a777185a634b095ebb2c8297f32fbaf90c 100644 (file)
@@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev)
        dma_addr_t dma;
 
        cp1_base =
-               (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
-                                                   &dma, GFP_ATOMIC));
+               (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+                                                   &dma, GFP_KERNEL));
 
        gpio_count = of_gpio_count(pdev->dev.of_node);
        while (gpio_count > 0) {
index b16710a8a9e7a2ee7d4372fa81635a4c7fdc79a9..0effd3cba9a731907920c6f47a1b52321ac7225f 100644 (file)
@@ -79,8 +79,6 @@ enum reg_val_type {
        REG_64BIT_32BIT,
        /* 32-bit compatible, need truncation for 64-bit ops. */
        REG_32BIT,
-       /* 32-bit zero extended. */
-       REG_32BIT_ZERO_EX,
        /* 32-bit no sign/zero extension needed. */
        REG_32BIT_POS
 };
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
        const struct bpf_prog *prog = ctx->skf;
        int stack_adjust = ctx->stack_size;
        int store_offset = stack_adjust - 8;
+       enum reg_val_type td;
        int r0 = MIPS_R_V0;
 
-       if (dest_reg == MIPS_R_RA &&
-           get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+       if (dest_reg == MIPS_R_RA) {
                /* Don't let zero extended value escape. */
-               emit_instr(ctx, sll, r0, r0, 0);
+               td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+               if (td == REG_64BIT)
+                       emit_instr(ctx, sll, r0, r0, 0);
+       }
 
        if (ctx->flags & EBPF_SAVE_RA) {
                emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (dst < 0)
                        return dst;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+               if (td == REG_64BIT)
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                if (insn->imm == 1) {
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
                if (src < 0 || dst < 0)
                        return -EINVAL;
                td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
-               if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+               if (td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
                did_move = false;
                ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
-               if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+               if (ts == REG_64BIT) {
                        int tmp_reg = MIPS_R_AT;
 
                        if (bpf_op == BPF_MOV) {
@@ -1254,8 +1255,7 @@ jeq_common:
                if (insn->imm == 64 && td == REG_32BIT)
                        emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
 
-               if (insn->imm != 64 &&
-                   (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+               if (insn->imm != 64 && td == REG_64BIT) {
                        /* sign extend */
                        emit_instr(ctx, sll, dst, dst, 0);
                }
@@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
        /* Update the icache */
        flush_icache_range((unsigned long)ctx.target,
-                          (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+                          (unsigned long)&ctx.target[ctx.idx]);
 
        if (bpf_jit_enable > 1)
                /* Dump JIT code */
index 2582df1c529bbcbb00262bba3f4e9534439766df..0964c236e3e5a711056e058a8a5ee63343f9a496 100644 (file)
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-       if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-           tracehook_report_syscall_entry(regs)) {
+       if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+               int rc = tracehook_report_syscall_entry(regs);
+
                /*
-                * Tracing decided this syscall should not happen or the
-                * debugger stored an invalid system call number. Skip
-                * the system call and the system call restart handling.
+                * As tracesys_next does not set %r28 to -ENOSYS
+                * when %r20 is set to -1, initialize it here.
                 */
-               regs->gr[20] = -1UL;
-               goto out;
+               regs->gr[28] = -ENOSYS;
+
+               if (rc) {
+                       /*
+                        * A nonzero return code from
+                        * tracehook_report_syscall_entry() tells us
+                        * to prevent the syscall execution.  Skip
+                        * the syscall call and the syscall restart handling.
+                        *
+                        * Note that the tracer may also just change
+                        * regs->gr[20] to an invalid syscall number,
+                        * that is handled by tracesys_next.
+                        */
+                       regs->gr[20] = -1UL;
+                       return -1;
+               }
        }
 
        /* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
                        regs->gr[24] & 0xffffffff,
                        regs->gr[23] & 0xffffffff);
 
-out:
        /*
         * Sign extend the syscall number to 64bit since it may have been
         * modified by a compat ptrace call
index 7db3119f8a5b33404a5b88d5eb035f9836a94d18..145373f0e5dc082ecd34f062143b88c82c22b24c 100644 (file)
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 
                pnv_pci_ioda2_setup_dma_pe(phb, pe);
 #ifdef CONFIG_IOMMU_API
+               iommu_register_group(&pe->table_group,
+                               pe->phb->hose->global_number, pe->pe_number);
                pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
 #endif
        }
index 45fb70b4bfa7ded84f50fcae3a050da5335de07d..ef9448a907c63037d1da229f1cebc91573c0e575 100644 (file)
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
                        return 0;
 
                pe = &phb->ioda.pe_array[pdn->pe_number];
+               if (!pe->table_group.group)
+                       return 0;
                iommu_add_device(&pe->table_group, dev);
                return 0;
        case BUS_NOTIFY_DEL_DEVICE:
index a153257bf7d9877a10ea0f857b8985bb798f3003..d62fa148558b99dccab51d3cdf8ea3cef06e31fa 100644 (file)
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        scb_s->crycbd = 0;
 
        apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
-       if (!apie_h && !key_msk)
+       if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
                return 0;
 
        if (!crycb_addr)
index 01d0f7fb14cce7b8e0afc17798d6cc8a1d2938d1..2563d1e532e22024d4c496dc6505a8cca731ff67 100644 (file)
@@ -1,3 +1,3 @@
 ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
+obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
 endif
index 705dafc2d11ab5bb9ddf3b249e0a69dae31a966e..2bdbbbcfa393fd6b2df936029917d20dc4ae8e84 100644 (file)
@@ -841,7 +841,7 @@ union hv_gpa_page_range {
  * count is equal with how many entries of union hv_gpa_page_range can
  * be populated into the input parameter page.
  */
-#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) /  \
+#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) /        \
                                sizeof(union hv_gpa_page_range))
 
 struct hv_guest_mapping_flush_list {
index 4660ce90de7ffe0d18af6af9eeb342d2feb55c7e..180373360e34256ef3a3bba30ada81f7df9d27ed 100644 (file)
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
                unsigned int cr4_smap:1;
                unsigned int cr4_smep:1;
                unsigned int cr4_la57:1;
+               unsigned int maxphyaddr:6;
        };
 };
 
@@ -397,6 +398,7 @@ struct kvm_mmu {
        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                           u64 *spte, const void *pte);
        hpa_t root_hpa;
+       gpa_t root_cr3;
        union kvm_mmu_role mmu_role;
        u8 root_level;
        u8 shadow_root_level;
index 780f2b42c8efe76b5d78291d4b633dd26b6e5c64..c1334aaaa78d32dbab9a38b45deb51d3b1487576 100644 (file)
@@ -284,7 +284,7 @@ do {                                                                        \
                __put_user_goto(x, ptr, "l", "k", "ir", label);         \
                break;                                                  \
        case 8:                                                         \
-               __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
+               __put_user_goto_u64(x, ptr, label);                     \
                break;                                                  \
        default:                                                        \
                __put_user_bad();                                       \
@@ -431,8 +431,10 @@ do {                                                                       \
 ({                                                             \
        __label__ __pu_label;                                   \
        int __pu_err = -EFAULT;                                 \
+       __typeof__(*(ptr)) __pu_val;                            \
+       __pu_val = x;                                           \
        __uaccess_begin();                                      \
-       __put_user_size((x), (ptr), (size), __pu_label);        \
+       __put_user_size(__pu_val, (ptr), (size), __pu_label);   \
        __pu_err = 0;                                           \
 __pu_label:                                                    \
        __uaccess_end();                                        \
index bbffa6c54697acc95c8da1cdc939e0f0a490bbcf..c07958b59f5051d525ddf3e110412f9377ac5e96 100644 (file)
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
        unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
        unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
+       unsigned f_la57 = 0;
 
        /* cpuid 1.edx */
        const u32 kvm_cpuid_1_edx_x86_features =
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                        // TSC_ADJUST is emulated
                        entry->ebx |= F(TSC_ADJUST);
                        entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+                       f_la57 = entry->ecx & F(LA57);
                        cpuid_mask(&entry->ecx, CPUID_7_ECX);
+                       /* Set LA57 based on hardware capability. */
+                       entry->ecx |= f_la57;
                        entry->ecx |= f_umip;
                        /* PKU is not yet implemented for shadow paging. */
                        if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
index da9c42349b1f800ec06ad3441be7ade3f852248f..f2d1d230d5b8421827aa447984fb271360bfec00 100644 (file)
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                                                           &invalid_list);
                        mmu->root_hpa = INVALID_PAGE;
                }
+               mmu->root_cr3 = 0;
        }
 
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
        } else
                BUG();
+       vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
 
        return 0;
 }
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *sp;
        u64 pdptr, pm_mask;
-       gfn_t root_gfn;
+       gfn_t root_gfn, root_cr3;
        int i;
 
-       root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
+       root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+       root_gfn = root_cr3 >> PAGE_SHIFT;
 
        if (mmu_check_root(vcpu, root_gfn))
                return 1;
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                ++sp->root_count;
                spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu->root_hpa = root;
-               return 0;
+               goto set_root_cr3;
        }
 
        /*
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
        }
 
+set_root_cr3:
+       vcpu->arch.mmu->root_cr3 = root_cr3;
+
        return 0;
 }
 
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
        struct kvm_mmu_root_info root;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
 
-       root.cr3 = mmu->get_cr3(vcpu);
+       root.cr3 = mmu->root_cr3;
        root.hpa = mmu->root_hpa;
 
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
        }
 
        mmu->root_hpa = root.hpa;
+       mmu->root_cr3 = root.cr3;
 
        return i < KVM_MMU_NUM_PREV_ROOTS;
 }
@@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
        ext.cr4_pse = !!is_pse(vcpu);
        ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
        ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+       ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
 
        ext.valid = 1;
 
@@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
 
        vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.root_mmu.root_cr3 = 0;
        vcpu->arch.root_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
 
        vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
+       vcpu->arch.guest_mmu.root_cr3 = 0;
        vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
index 6521134057e8f9ef34dbfaf8a0a4e46672a32d3c..856fa409c536408bf6b92039f3c88eeaa8ae53e6 100644 (file)
@@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
 }
 EXPORT_SYMBOL_GPL(ex_handler_fprestore);
 
-/* Helper to check whether a uaccess fault indicates a kernel bug. */
-static bool bogus_uaccess(struct pt_regs *regs, int trapnr,
-                         unsigned long fault_addr)
-{
-       /* This is the normal case: #PF with a fault address in userspace. */
-       if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX)
-               return false;
-
-       /*
-        * This code can be reached for machine checks, but only if the #MC
-        * handler has already decided that it looks like a candidate for fixup.
-        * This e.g. happens when attempting to access userspace memory which
-        * the CPU can't access because of uncorrectable bad memory.
-        */
-       if (trapnr == X86_TRAP_MC)
-               return false;
-
-       /*
-        * There are two remaining exception types we might encounter here:
-        *  - #PF for faulting accesses to kernel addresses
-        *  - #GP for faulting accesses to noncanonical addresses
-        * Complain about anything else.
-        */
-       if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) {
-               WARN(1, "unexpected trap %d in uaccess\n", trapnr);
-               return false;
-       }
-
-       /*
-        * This is a faulting memory access in kernel space, on a kernel
-        * address, in a usercopy function. This can e.g. be caused by improper
-        * use of helpers like __put_user and by improper attempts to access
-        * userspace addresses in KERNEL_DS regions.
-        * The one (semi-)legitimate exception are probe_kernel_{read,write}(),
-        * which can be invoked from places like kgdb, /dev/mem (for reading)
-        * and privileged BPF code (for reading).
-        * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag
-        * to tell us that faulting on kernel addresses, and even noncanonical
-        * addresses, in a userspace accessor does not necessarily imply a
-        * kernel bug, root might just be doing weird stuff.
-        */
-       if (current->kernel_uaccess_faults_ok)
-               return false;
-
-       /* This is bad. Refuse the fixup so that we go into die(). */
-       if (trapnr == X86_TRAP_PF) {
-               pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n",
-                        fault_addr);
-       } else {
-               pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n");
-       }
-       return true;
-}
-
 __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
                                  struct pt_regs *regs, int trapnr,
                                  unsigned long error_code,
                                  unsigned long fault_addr)
 {
-       if (bogus_uaccess(regs, trapnr, fault_addr))
-               return false;
        regs->ip = ex_fixup_addr(fixup);
        return true;
 }
@@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
                              unsigned long error_code,
                              unsigned long fault_addr)
 {
-       if (bogus_uaccess(regs, trapnr, fault_addr))
-               return false;
        /* Special hack for uaccess_err */
        current->thread.uaccess_err = 1;
        regs->ip = ex_fixup_addr(fixup);
index 17eb09d222ff4e865ccd3c8766d54aad5bfaf34f..ec78a04eb136e8bfd31e9ce3ab2d1b0a7ed29be1 100644 (file)
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
 
 int af_alg_release(struct socket *sock)
 {
-       if (sock->sk)
+       if (sock->sk) {
                sock_put(sock->sk);
+               sock->sk = NULL;
+       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(af_alg_release);
index 0ea2139c50d875d0fdce8e9fb55374fd6bdc337b..ccd296dbb95c4a406b2047876a63ca812ed082f6 100644 (file)
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
 static void pm_runtime_deactivate_timer(struct device *dev)
 {
        if (dev->power.timer_expires > 0) {
-               hrtimer_cancel(&dev->power.suspend_timer);
+               hrtimer_try_to_cancel(&dev->power.suspend_timer);
                dev->power.timer_expires = 0;
        }
 }
index 2fe225a697df8be805ce90e739f2e85208bf071e..3487e03d4bc61c277fe8f44faeececa326e21876 100644 (file)
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
                return;
 
        at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
-                                          nck(at91sam9x5_systemck),
-                                          nck(at91sam9x35_periphck), 0);
+                                          nck(at91sam9x5_systemck), 31, 0);
        if (!at91sam9x5_pmc)
                return;
 
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
        parent_names[1] = "mainck";
        parent_names[2] = "plladivck";
        parent_names[3] = "utmick";
-       parent_names[4] = "mck";
+       parent_names[4] = "masterck";
        for (i = 0; i < 2; i++) {
                char name[6];
 
index d69ad96fe988b5bcada0ce937141829c2c685ead..cd0ef7274fdbf1ddab7f167724aa7868a5b92ae0 100644 (file)
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
        parent_names[1] = "mainck";
        parent_names[2] = "plladivck";
        parent_names[3] = "utmick";
-       parent_names[4] = "mck";
+       parent_names[4] = "masterck";
        for (i = 0; i < 3; i++) {
                char name[6];
 
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
        parent_names[1] = "mainck";
        parent_names[2] = "plladivck";
        parent_names[3] = "utmick";
-       parent_names[4] = "mck";
+       parent_names[4] = "masterck";
        parent_names[5] = "audiopll_pmcck";
        for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
                hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
index e358be7f6c8d5e40cfaa5cca562fbab813f2d124..b645a9d59cdbd61aa6358072ba5783265f873da7 100644 (file)
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
        parent_names[1] = "mainck";
        parent_names[2] = "plladivck";
        parent_names[3] = "utmick";
-       parent_names[4] = "mck";
+       parent_names[4] = "masterck";
        for (i = 0; i < 3; i++) {
                char name[6];
 
index 3b97f60540ad8cd29aeb06a69620dcf940a15213..609970c0b6665caa2e1a8babedffe2c5c771588a 100644 (file)
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk,        "ahb1-mmc1",    "ahb1",
 static SUNXI_CCU_GATE(ahb1_mmc2_clk,   "ahb1-mmc2",    "ahb1",
                      0x060, BIT(10), 0);
 static SUNXI_CCU_GATE(ahb1_mmc3_clk,   "ahb1-mmc3",    "ahb1",
-                     0x060, BIT(12), 0);
+                     0x060, BIT(11), 0);
 static SUNXI_CCU_GATE(ahb1_nand1_clk,  "ahb1-nand1",   "ahb1",
-                     0x060, BIT(13), 0);
+                     0x060, BIT(12), 0);
 static SUNXI_CCU_GATE(ahb1_nand0_clk,  "ahb1-nand0",   "ahb1",
                      0x060, BIT(13), 0);
 static SUNXI_CCU_GATE(ahb1_sdram_clk,  "ahb1-sdram",   "ahb1",
index 621b1cd996dbb4e5e4d1e621172e4a7e53e746f5..ac12f261f8caa3f76d0b8407be287506ed7ff7cc 100644 (file)
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
        [RST_BUS_OHCI0]         =  { 0x2c0, BIT(29) },
 
        [RST_BUS_VE]            =  { 0x2c4, BIT(0) },
-       [RST_BUS_TCON0]         =  { 0x2c4, BIT(3) },
+       [RST_BUS_TCON0]         =  { 0x2c4, BIT(4) },
        [RST_BUS_CSI]           =  { 0x2c4, BIT(8) },
        [RST_BUS_DE]            =  { 0x2c4, BIT(12) },
        [RST_BUS_DBG]           =  { 0x2c4, BIT(31) },
index 242c3370544e6a16480dc92c5d19ba9f17fc3d32..9ed46d188cb5ba3fee03ac61623bf8f531ec6dae 100644 (file)
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
 
        cpufreq_cooling_unregister(priv->cdev);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       kfree(priv);
        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+       kfree(priv);
 
        return 0;
 }
index f6262435702094effeb6356bf5636728ffb06fd2..907a6db4d6c036fe1c5433d5bda095d157e166b0 100644 (file)
@@ -30,7 +30,7 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
        return 0;
 }
 
-static void cc_pm_go(struct cc_drvdata *drvdata) {}
+static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
 
 static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
 
index 00e954f22bc920257bb1645196206e5115afb13c..74401e0adb29ce044c3534745e5f0218ab563965 100644 (file)
@@ -30,6 +30,7 @@
 #define GPIO_REG_EDGE          0xA0
 
 struct mtk_gc {
+       struct irq_chip irq_chip;
        struct gpio_chip chip;
        spinlock_t lock;
        int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
-static struct irq_chip mediatek_gpio_irq_chip = {
-       .irq_unmask             = mediatek_gpio_irq_unmask,
-       .irq_mask               = mediatek_gpio_irq_mask,
-       .irq_mask_ack           = mediatek_gpio_irq_mask,
-       .irq_set_type           = mediatek_gpio_irq_type,
-};
-
 static int
 mediatek_gpio_xlate(struct gpio_chip *chip,
                    const struct of_phandle_args *spec, u32 *flags)
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
                return ret;
        }
 
+       rg->irq_chip.name = dev_name(dev);
+       rg->irq_chip.parent_device = dev;
+       rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+       rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+       rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+       rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
        if (mtk->gpio_irq) {
                /*
                 * Manually request the irq here instead of passing
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
                        return ret;
                }
 
-               ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+               ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
                                           0, handle_simple_irq, IRQ_TYPE_NONE);
                if (ret) {
                        dev_err(dev, "failed to add gpiochip_irqchip\n");
                        return ret;
                }
 
-               gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+               gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
                                             mtk->gpio_irq, NULL);
        }
 
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
        mtk->gpio_irq = irq_of_parse_and_map(np, 0);
        mtk->dev = dev;
        platform_set_drvdata(pdev, mtk);
-       mediatek_gpio_irq_chip.name = dev_name(dev);
 
        for (i = 0; i < MTK_BANK_CNT; i++) {
                ret = mediatek_gpio_bank_probe(dev, np, i);
index e9600b556f397babf8c472ceb2b012f2de97b42a..bcc6be4a5cb2ed38c000c6e15a99e91692c81c2c 100644 (file)
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
 {
        switch (gpio_type) {
        case PXA3XX_GPIO:
+       case MMP2_GPIO:
                return false;
 
        default:
index 9efa681d08781162b3e51c52564adc176f30081d..8d0d7f3dd5fb6d5db89b3042f773ed9766a20d0f 100644 (file)
@@ -411,6 +411,8 @@ struct amdgpu_fpriv {
        struct amdgpu_ctx_mgr   ctx_mgr;
 };
 
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
+
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                  unsigned size, struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
index e957e42c539a8e7d73d6e26c3a35f145d8da4e7f..fe1d7368c1e666b89648fa0f05cf8e8c8af6246a 100644 (file)
@@ -131,7 +131,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
 
 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
-       int i, n;
+       int i;
        int last_valid_bit;
 
        if (adev->kfd.dev) {
@@ -142,7 +142,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                        .gpuvm_size = min(adev->vm_manager.max_pfn
                                          << AMDGPU_GPU_PAGE_SHIFT,
                                          AMDGPU_GMC_HOLE_START),
-                       .drm_render_minor = adev->ddev->render->index
+                       .drm_render_minor = adev->ddev->render->index,
+                       .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
+
                };
 
                /* this is going to have a few of the MSBs set that we need to
@@ -172,35 +174,20 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                                &gpu_resources.doorbell_aperture_size,
                                &gpu_resources.doorbell_start_offset);
 
-               if (adev->asic_type < CHIP_VEGA10) {
-                       kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
-                       return;
-               }
-
-               n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
-
-               for (i = 0; i < n; i += 2) {
-                       /* On SOC15 the BIF is involved in routing
-                        * doorbells using the low 12 bits of the
-                        * address. Communicate the assignments to
-                        * KFD. KFD uses two doorbell pages per
-                        * process in case of 64-bit doorbells so we
-                        * can use each doorbell assignment twice.
-                        */
-                       gpu_resources.sdma_doorbell[0][i] =
-                               adev->doorbell_index.sdma_engine[0] + (i >> 1);
-                       gpu_resources.sdma_doorbell[0][i+1] =
-                               adev->doorbell_index.sdma_engine[0] + 0x200 + (i >> 1);
-                       gpu_resources.sdma_doorbell[1][i] =
-                               adev->doorbell_index.sdma_engine[1] + (i >> 1);
-                       gpu_resources.sdma_doorbell[1][i+1] =
-                               adev->doorbell_index.sdma_engine[1] + 0x200 + (i >> 1);
-               }
-               /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
-                * SDMA, IH and VCN. So don't use them for the CP.
+               /* Since SOC15, BIF starts to statically use the
+                * lower 12 bits of doorbell addresses for routing
+                * based on settings in registers like
+                * SDMA0_DOORBELL_RANGE etc..
+                * In order to route a doorbell to CP engine, the lower
+                * 12 bits of its address has to be outside the range
+                * set for SDMA, VCN, and IH blocks.
                 */
-               gpu_resources.reserved_doorbell_mask = 0x1e0;
-               gpu_resources.reserved_doorbell_val  = 0x0e0;
+               if (adev->asic_type >= CHIP_VEGA10) {
+                       gpu_resources.non_cp_doorbells_start =
+                                       adev->doorbell_index.first_non_cp;
+                       gpu_resources.non_cp_doorbells_end =
+                                       adev->doorbell_index.last_non_cp;
+               }
 
                kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
        }
index d7b10d79f1debdcb5e8f254dd69c7d0b7119e730..1921dec3df7aba0bad7878023bc71b8225081148 100644 (file)
@@ -204,38 +204,25 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 }
 
 
-/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
  *  reservation object.
  *
  * @bo: [IN] Remove eviction fence(s) from this BO
- * @ef: [IN] If ef is specified, then this eviction fence is removed if it
+ * @ef: [IN] This eviction fence is removed if it
  *  is present in the shared list.
- * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
- *  from BO's reservation object shared list.
- * @ef_count: [OUT] Number of fences in ef_list.
  *
- * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
- *  called to restore the eviction fences and to avoid memory leak. This is
- *  useful for shared BOs.
  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
  */
 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
-                                       struct amdgpu_amdkfd_fence *ef,
-                                       struct amdgpu_amdkfd_fence ***ef_list,
-                                       unsigned int *ef_count)
+                                       struct amdgpu_amdkfd_fence *ef)
 {
        struct reservation_object *resv = bo->tbo.resv;
        struct reservation_object_list *old, *new;
        unsigned int i, j, k;
 
-       if (!ef && !ef_list)
+       if (!ef)
                return -EINVAL;
 
-       if (ef_list) {
-               *ef_list = NULL;
-               *ef_count = 0;
-       }
-
        old = reservation_object_get_list(resv);
        if (!old)
                return 0;
@@ -254,8 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
                f = rcu_dereference_protected(old->shared[i],
                                              reservation_object_held(resv));
 
-               if ((ef && f->context == ef->base.context) ||
-                   (!ef && to_amdgpu_amdkfd_fence(f)))
+               if (f->context == ef->base.context)
                        RCU_INIT_POINTER(new->shared[--j], f);
                else
                        RCU_INIT_POINTER(new->shared[k++], f);
@@ -263,21 +249,6 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
        new->shared_max = old->shared_max;
        new->shared_count = k;
 
-       if (!ef) {
-               unsigned int count = old->shared_count - j;
-
-               /* Alloc memory for count number of eviction fence pointers.
-                * Fill the ef_list array and ef_count
-                */
-               *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
-               *ef_count = count;
-
-               if (!*ef_list) {
-                       kfree(new);
-                       return -ENOMEM;
-               }
-       }
-
        /* Install the new fence list, seqcount provides the barriers */
        preempt_disable();
        write_seqcount_begin(&resv->seq);
@@ -291,46 +262,13 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
 
                f = rcu_dereference_protected(new->shared[i],
                                              reservation_object_held(resv));
-               if (!ef)
-                       (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
-               else
-                       dma_fence_put(f);
+               dma_fence_put(f);
        }
        kfree_rcu(old, rcu);
 
        return 0;
 }
 
-/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
- *  reservation object.
- *
- * @bo: [IN] Add eviction fences to this BO
- * @ef_list: [IN] List of eviction fences to be added
- * @ef_count: [IN] Number of fences in ef_list.
- *
- * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
- *  function.
- */
-static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
-                               struct amdgpu_amdkfd_fence **ef_list,
-                               unsigned int ef_count)
-{
-       int i;
-
-       if (!ef_list || !ef_count)
-               return;
-
-       for (i = 0; i < ef_count; i++) {
-               amdgpu_bo_fence(bo, &ef_list[i]->base, true);
-               /* Re-adding the fence takes an additional reference. Drop that
-                * reference.
-                */
-               dma_fence_put(&ef_list[i]->base);
-       }
-
-       kfree(ef_list);
-}
-
 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
                                     bool wait)
 {
@@ -346,18 +284,8 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
        ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (ret)
                goto validate_fail;
-       if (wait) {
-               struct amdgpu_amdkfd_fence **ef_list;
-               unsigned int ef_count;
-
-               ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
-                                                         &ef_count);
-               if (ret)
-                       goto validate_fail;
-
-               ttm_bo_wait(&bo->tbo, false, false);
-               amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
-       }
+       if (wait)
+               amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
 
 validate_fail:
        return ret;
@@ -444,7 +372,6 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
 {
        int ret;
        struct kfd_bo_va_list *bo_va_entry;
-       struct amdgpu_bo *pd = vm->root.base.bo;
        struct amdgpu_bo *bo = mem->bo;
        uint64_t va = mem->va;
        struct list_head *list_bo_va = &mem->bo_va_list;
@@ -484,14 +411,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
                *p_bo_va_entry = bo_va_entry;
 
        /* Allocate new page tables if needed and validate
-        * them. Clearing of new page tables and validate need to wait
-        * on move fences. We don't want that to trigger the eviction
-        * fence, so remove it temporarily.
+        * them.
         */
-       amdgpu_amdkfd_remove_eviction_fence(pd,
-                                       vm->process_info->eviction_fence,
-                                       NULL, NULL);
-
        ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
        if (ret) {
                pr_err("Failed to allocate pts, err=%d\n", ret);
@@ -504,13 +425,9 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
                goto err_alloc_pts;
        }
 
-       /* Add the eviction fence back */
-       amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
        return 0;
 
 err_alloc_pts:
-       amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
        amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
        list_del(&bo_va_entry->bo_list);
 err_vmadd:
@@ -809,24 +726,11 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
        struct amdgpu_vm *vm = bo_va->base.vm;
-       struct amdgpu_bo *pd = vm->root.base.bo;
 
-       /* Remove eviction fence from PD (and thereby from PTs too as
-        * they share the resv. object). Otherwise during PT update
-        * job (see amdgpu_vm_bo_update_mapping), eviction fence would
-        * get added to job->sync object and job execution would
-        * trigger the eviction fence.
-        */
-       amdgpu_amdkfd_remove_eviction_fence(pd,
-                                           vm->process_info->eviction_fence,
-                                           NULL, NULL);
        amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
 
        amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
 
-       /* Add the eviction fence back */
-       amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
        amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
 
        return 0;
@@ -1002,7 +906,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
                pr_err("validate_pt_pd_bos() failed\n");
                goto validate_pd_fail;
        }
-       ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
+       amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
        amdgpu_bo_fence(vm->root.base.bo,
@@ -1389,8 +1293,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
         * attached
         */
        amdgpu_amdkfd_remove_eviction_fence(mem->bo,
-                                       process_info->eviction_fence,
-                                       NULL, NULL);
+                                       process_info->eviction_fence);
        pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
                mem->va + bo_size * (1 + mem->aql_queue));
 
@@ -1617,8 +1520,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
        if (mem->mapped_to_gpu_memory == 0 &&
            !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
                amdgpu_amdkfd_remove_eviction_fence(mem->bo,
-                                               process_info->eviction_fence,
-                                                   NULL, NULL);
+                                               process_info->eviction_fence);
 
 unreserve_out:
        unreserve_bo_and_vms(&ctx, false, false);
@@ -1679,7 +1581,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
        }
 
        amdgpu_amdkfd_remove_eviction_fence(
-               bo, mem->process_info->eviction_fence, NULL, NULL);
+               bo, mem->process_info->eviction_fence);
        list_del_init(&mem->validate_list.head);
 
        if (size)
@@ -1945,16 +1847,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
 
        amdgpu_sync_create(&sync);
 
-       /* Avoid triggering eviction fences when unmapping invalid
-        * userptr BOs (waits for all fences, doesn't use
-        * FENCE_OWNER_VM)
-        */
-       list_for_each_entry(peer_vm, &process_info->vm_list_head,
-                           vm_list_node)
-               amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
-                                               process_info->eviction_fence,
-                                               NULL, NULL);
-
        ret = process_validate_vms(process_info);
        if (ret)
                goto unreserve_out;
@@ -2015,10 +1907,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        ret = process_update_pds(process_info, &sync);
 
 unreserve_out:
-       list_for_each_entry(peer_vm, &process_info->vm_list_head,
-                           vm_list_node)
-               amdgpu_bo_fence(peer_vm->root.base.bo,
-                               &process_info->eviction_fence->base, true);
        ttm_eu_backoff_reservation(&ticket, &resv_list);
        amdgpu_sync_wait(&sync, false);
        amdgpu_sync_free(&sync);
index d85184b5b35cf8851a1459f0879433da0a40c274..7b526593eb77b46050aa435e677752c66ff48512 100644 (file)
@@ -124,6 +124,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
                struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
                unsigned num_rings;
+               unsigned num_rqs = 0;
 
                switch (i) {
                case AMDGPU_HW_IP_GFX:
@@ -166,12 +167,16 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                        break;
                }
 
-               for (j = 0; j < num_rings; ++j)
-                       rqs[j] = &rings[j]->sched.sched_rq[priority];
+               for (j = 0; j < num_rings; ++j) {
+                       if (!rings[j]->adev)
+                               continue;
+
+                       rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
+               }
 
                for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
                        r = drm_sched_entity_init(&ctx->entities[i][j].entity,
-                                                 rqs, num_rings, &ctx->guilty);
+                                                 rqs, num_rqs, &ctx->guilty);
                if (r)
                        goto error_cleanup_entities;
        }
index dd9a4fb9ce39b03e781f4581d4675b2a5aafbd67..4ae3ff9a1d4c9ac923d6689687c4ed5def0cb8f5 100644 (file)
@@ -158,9 +158,6 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
        while (size) {
                uint32_t value;
 
-               if (*pos > adev->rmmio_size)
-                       goto end;
-
                if (read) {
                        value = RREG32(*pos >> 2);
                        r = put_user(value, (uint32_t *)buf);
index 1cfec06f81d4049a35534f73f90defa303bcad2e..68959b923f89589560cfefbe8200ba14d883a1c5 100644 (file)
@@ -71,6 +71,8 @@ struct amdgpu_doorbell_index {
                        uint32_t vce_ring6_7;
                } uvd_vce;
        };
+       uint32_t first_non_cp;
+       uint32_t last_non_cp;
        uint32_t max_assignment;
        /* Per engine SDMA doorbell size in dword */
        uint32_t sdma_doorbell_range;
@@ -143,6 +145,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
        AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3             = 0x18D,
        AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5             = 0x18E,
        AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7             = 0x18F,
+
+       AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP            = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
+       AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP             = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
+
        AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT            = 0x18F,
        AMDGPU_VEGA20_DOORBELL_INVALID                   = 0xFFFF
 } AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
@@ -222,6 +228,9 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
        AMDGPU_DOORBELL64_VCE_RING4_5             = 0xFE,
        AMDGPU_DOORBELL64_VCE_RING6_7             = 0xFF,
 
+       AMDGPU_DOORBELL64_FIRST_NON_CP            = AMDGPU_DOORBELL64_sDMA_ENGINE0,
+       AMDGPU_DOORBELL64_LAST_NON_CP             = AMDGPU_DOORBELL64_VCE_RING6_7,
+
        AMDGPU_DOORBELL64_MAX_ASSIGNMENT          = 0xFF,
        AMDGPU_DOORBELL64_INVALID                 = 0xFFFF
 } AMDGPU_DOORBELL64_ASSIGNMENT;
index 1c4595562f8fd29bc682764265e5d2fd472b1f2c..344967df31379295236a279305a5fe88d329ae4d 100644 (file)
@@ -184,61 +184,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
        return vrefresh;
 }
 
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
-                             u32 *p, u32 *u)
-{
-       u32 b_c = 0;
-       u32 i_c;
-       u32 tmp;
-
-       i_c = (i * r_c) / 100;
-       tmp = i_c >> p_b;
-
-       while (tmp) {
-               b_c++;
-               tmp >>= 1;
-       }
-
-       *u = (b_c + 1) / 2;
-       *p = i_c / (1 << (2 * (*u)));
-}
-
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
-       u32 k, a, ah, al;
-       u32 t1;
-
-       if ((fl == 0) || (fh == 0) || (fl > fh))
-               return -EINVAL;
-
-       k = (100 * fh) / fl;
-       t1 = (t * (k - 100));
-       a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
-       a = (a + 5) / 10;
-       ah = ((a * t) + 5000) / 10000;
-       al = a - ah;
-
-       *th = t - ah;
-       *tl = t + al;
-
-       return 0;
-}
-
-bool amdgpu_is_uvd_state(u32 class, u32 class2)
-{
-       if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
-               return true;
-       if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
-               return true;
-       if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
-               return true;
-       if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
-               return true;
-       if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
-               return true;
-       return false;
-}
-
 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
 {
        switch (sensor) {
@@ -949,39 +894,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
        return AMDGPU_PCIE_GEN1;
 }
 
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
-                                u16 asic_lanes,
-                                u16 default_lanes)
-{
-       switch (asic_lanes) {
-       case 0:
-       default:
-               return default_lanes;
-       case 1:
-               return 1;
-       case 2:
-               return 2;
-       case 4:
-               return 4;
-       case 8:
-               return 8;
-       case 12:
-               return 12;
-       case 16:
-               return 16;
-       }
-}
-
-u8 amdgpu_encode_pci_lane_width(u32 lanes)
-{
-       u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
-
-       if (lanes > 16)
-               return 0;
-
-       return encoded_lanes[lanes];
-}
-
 struct amd_vce_state*
 amdgpu_get_vce_clock_state(void *handle, u32 idx)
 {
index 2f61e9edb1c1a10f69322807bb3f75d76a5ae97b..e871e022c129249f3e23edbd5754803a3fe0a72f 100644 (file)
@@ -486,10 +486,6 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-bool amdgpu_is_uvd_state(u32 class, u32 class2);
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
-                             u32 *p, u32 *u);
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
 
 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
 
@@ -505,11 +501,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
                                                 enum amdgpu_pcie_gen asic_gen,
                                                 enum amdgpu_pcie_gen default_gen);
 
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
-                                u16 asic_lanes,
-                                u16 default_lanes);
-u8 amdgpu_encode_pci_lane_width(u32 lanes);
-
 struct amd_vce_state*
 amdgpu_get_vce_clock_state(void *handle, u32 idx);
 
index 7f3aa7b7e1d82d38f11b360ab96ab6004fd9178d..7419ea8a388b656fd9b55753fb1bb3c152f70037 100644 (file)
  * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
  * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
  * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
+ * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       29
+#define KMS_DRIVER_MINOR       30
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -1178,6 +1179,22 @@ static const struct file_operations amdgpu_driver_kms_fops = {
 #endif
 };
 
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
+{
+        struct drm_file *file;
+
+       if (!filp)
+               return -EINVAL;
+
+       if (filp->f_op != &amdgpu_driver_kms_fops) {
+               return -EINVAL;
+       }
+
+       file = filp->private_data;
+       *fpriv = file->driver_priv;
+       return 0;
+}
+
 static bool
 amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
                                 bool in_vblank_irq, int *vpos, int *hpos,
index d0a5db777b6d24c0b505a3ea827877ee91728ccf..1c50be3ab8a965ff8ba404afa522f241cb28158f 100644 (file)
@@ -140,9 +140,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
  * Interrupt hander (VI), walk the IH ring.
  * Returns irq process return code.
  */
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
-                     void (*callback)(struct amdgpu_device *adev,
-                                      struct amdgpu_ih_ring *ih))
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
 {
        u32 wptr;
 
@@ -162,7 +160,7 @@ restart_ih:
        rmb();
 
        while (ih->rptr != wptr) {
-               callback(adev, ih);
+               amdgpu_irq_dispatch(adev, ih);
                ih->rptr &= ih->ptr_mask;
        }
 
index 1ccb1831382a2699a91d69a8a441f0e4fc60bfbb..113a1ba13d4a4b0214e6d76f629e7a7dbc31f4f1 100644 (file)
@@ -69,8 +69,6 @@ struct amdgpu_ih_funcs {
 int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
                        unsigned ring_size, bool use_bus_addr);
 void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
-                     void (*callback)(struct amdgpu_device *adev,
-                                      struct amdgpu_ih_ring *ih));
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
 
 #endif
index 8bfb3dab46f711029018729db41bbf808c34261e..af4c3b1af3223647e218a99ef23129beaac41b39 100644 (file)
@@ -130,29 +130,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
        spin_unlock_irqrestore(&adev->irq.lock, irqflags);
 }
 
-/**
- * amdgpu_irq_callback - callback from the IH ring
- *
- * @adev: amdgpu device pointer
- * @ih: amdgpu ih ring
- *
- * Callback from IH ring processing to handle the entry at the current position
- * and advance the read pointer.
- */
-static void amdgpu_irq_callback(struct amdgpu_device *adev,
-                               struct amdgpu_ih_ring *ih)
-{
-       u32 ring_index = ih->rptr >> 2;
-       struct amdgpu_iv_entry entry;
-
-       entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
-       amdgpu_ih_decode_iv(adev, &entry);
-
-       trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
-
-       amdgpu_irq_dispatch(adev, &entry);
-}
-
 /**
  * amdgpu_irq_handler - IRQ handler
  *
@@ -170,7 +147,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
        struct amdgpu_device *adev = dev->dev_private;
        irqreturn_t ret;
 
-       ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
+       ret = amdgpu_ih_process(adev, &adev->irq.ih);
        if (ret == IRQ_HANDLED)
                pm_runtime_mark_last_busy(dev->dev);
        return ret;
@@ -188,7 +165,7 @@ static void amdgpu_irq_handle_ih1(struct work_struct *work)
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  irq.ih1_work);
 
-       amdgpu_ih_process(adev, &adev->irq.ih1, amdgpu_irq_callback);
+       amdgpu_ih_process(adev, &adev->irq.ih1);
 }
 
 /**
@@ -203,7 +180,7 @@ static void amdgpu_irq_handle_ih2(struct work_struct *work)
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  irq.ih2_work);
 
-       amdgpu_ih_process(adev, &adev->irq.ih2, amdgpu_irq_callback);
+       amdgpu_ih_process(adev, &adev->irq.ih2);
 }
 
 /**
@@ -394,14 +371,23 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
  * Dispatches IRQ to IP blocks.
  */
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
-                        struct amdgpu_iv_entry *entry)
+                        struct amdgpu_ih_ring *ih)
 {
-       unsigned client_id = entry->client_id;
-       unsigned src_id = entry->src_id;
+       u32 ring_index = ih->rptr >> 2;
+       struct amdgpu_iv_entry entry;
+       unsigned client_id, src_id;
        struct amdgpu_irq_src *src;
        bool handled = false;
        int r;
 
+       entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+       amdgpu_ih_decode_iv(adev, &entry);
+
+       trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+
+       client_id = entry.client_id;
+       src_id = entry.src_id;
+
        if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
                DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
 
@@ -416,7 +402,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
                          client_id, src_id);
 
        } else if ((src = adev->irq.client[client_id].sources[src_id])) {
-               r = src->funcs->process(adev, src, entry);
+               r = src->funcs->process(adev, src, &entry);
                if (r < 0)
                        DRM_ERROR("error processing interrupt (%d)\n", r);
                else if (r)
@@ -428,7 +414,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 
        /* Send it to amdkfd as well if it isn't already handled */
        if (!handled)
-               amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
+               amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
 }
 
 /**
index c27decfda494d7f6caaa430225ec584f74b30240..c718e94a55c9c2bec2478ef90ba908ef5cb4574f 100644 (file)
@@ -108,7 +108,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
                      unsigned client_id, unsigned src_id,
                      struct amdgpu_irq_src *source);
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
-                        struct amdgpu_iv_entry *entry);
+                        struct amdgpu_ih_ring *ih);
 int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                      unsigned type);
 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
index bc62bf41b7e9b428a66b88e7d67a7f684133f7dd..e860412043bb13cd26005e46df17f55ea419278f 100644 (file)
@@ -207,11 +207,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        if (!r) {
                acpi_status = amdgpu_acpi_init(adev);
                if (acpi_status)
-               dev_dbg(&dev->pdev->dev,
+                       dev_dbg(&dev->pdev->dev,
                                "Error during ACPI methods call\n");
        }
 
        if (amdgpu_device_is_px(dev)) {
+               dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
index 698fd8a2f775719ba53f3683dd0225cb5a825c38..889e443eeee7ef66e571ad0923bfeb072d41401b 100644 (file)
@@ -406,6 +406,7 @@ struct amdgpu_crtc {
        struct amdgpu_flip_work *pflip_works;
        enum amdgpu_flip_status pflip_status;
        int deferred_flip_completion;
+       u64 last_flip_vblank;
        /* pll sharing */
        struct amdgpu_atom_ss ss;
        bool ss_enabled;
index fd9c4beeaaa494c2480935de0014b6070660180a..ec9e45004bff3391d902ca13f1b2a732803d13e7 100644 (file)
@@ -1284,6 +1284,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                reservation_object_add_excl_fence(resv, fence);
 }
 
+/**
+ * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ *
+ * @bo: buffer object
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_sync sync;
+       int r;
+
+       amdgpu_sync_create(&sync);
+       amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+       r = amdgpu_sync_wait(&sync, intr);
+       amdgpu_sync_free(&sync);
+
+       return r;
+}
+
 /**
  * amdgpu_bo_gpu_offset - return GPU offset of bo
  * @bo:        amdgpu object for which we query the offset
index 9291c2f837e95b674dd535f35c4e7daa82cbcb7a..220a6a7b1bc155f93880ef12b914e6a5aa92eb4d 100644 (file)
@@ -266,6 +266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 int amdgpu_bo_validate(struct amdgpu_bo *bo);
 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
index 1cafe8d83a4dbaa60915801a7849a81613ef69c0..0767a93e4d9136c9f79bd46f1d041e4e7433ca1a 100644 (file)
@@ -54,16 +54,20 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
                                                  enum drm_sched_priority priority)
 {
        struct file *filp = fget(fd);
-       struct drm_file *file;
        struct amdgpu_fpriv *fpriv;
        struct amdgpu_ctx *ctx;
        uint32_t id;
+       int r;
 
        if (!filp)
                return -EINVAL;
 
-       file = filp->private_data;
-       fpriv = file->driver_priv;
+       r = amdgpu_file_to_fpriv(filp, &fpriv);
+       if (r) {
+               fput(filp);
+               return r;
+       }
+
        idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
                amdgpu_ctx_priority_override(ctx, priority);
 
@@ -72,6 +76,39 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
        return 0;
 }
 
+static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
+                                                 int fd,
+                                                 unsigned ctx_id,
+                                                 enum drm_sched_priority priority)
+{
+       struct file *filp = fget(fd);
+       struct amdgpu_fpriv *fpriv;
+       struct amdgpu_ctx *ctx;
+       int r;
+
+       if (!filp)
+               return -EINVAL;
+
+       r = amdgpu_file_to_fpriv(filp, &fpriv);
+       if (r) {
+               fput(filp);
+               return r;
+       }
+
+       ctx = amdgpu_ctx_get(fpriv, ctx_id);
+
+       if (!ctx) {
+               fput(filp);
+               return -EINVAL;
+       }
+
+       amdgpu_ctx_priority_override(ctx, priority);
+       amdgpu_ctx_put(ctx);
+       fput(filp);
+
+       return 0;
+}
+
 int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *filp)
 {
@@ -81,7 +118,7 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
        int r;
 
        priority = amdgpu_to_sched_priority(args->in.priority);
-       if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
+       if (priority == DRM_SCHED_PRIORITY_INVALID)
                return -EINVAL;
 
        switch (args->in.op) {
@@ -90,6 +127,12 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
                                                           args->in.fd,
                                                           priority);
                break;
+       case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+               r = amdgpu_sched_context_priority_override(adev,
+                                                          args->in.fd,
+                                                          args->in.ctx_id,
+                                                          priority);
+               break;
        default:
                DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
                r = -EINVAL;
index 7cd2336e29ff33bcf564e6e23cc0888c9dd7c84c..ead851413c0aa054c6c8e6a74d23a7cc739a5fa9 100644 (file)
@@ -652,12 +652,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
 
+#if 0
        if (vm->bulk_moveable) {
                spin_lock(&glob->lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
                spin_unlock(&glob->lru_lock);
                return;
        }
+#endif
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
@@ -698,8 +700,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        struct amdgpu_vm_bo_base *bo_base, *tmp;
        int r = 0;
 
-       vm->bulk_moveable &= list_empty(&vm->evicted);
-
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
@@ -828,7 +828,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 
        WARN_ON(job->ibs[0].length_dw > 64);
        r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
-                            AMDGPU_FENCE_OWNER_UNDEFINED, false);
+                            AMDGPU_FENCE_OWNER_KFD, false);
        if (r)
                goto error_free;
 
@@ -1332,31 +1332,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
        }
 }
 
-
-/**
- * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
- *
- * @adev: amdgpu_device pointer
- * @vm: related vm
- * @owner: fence owner
- *
- * Returns:
- * 0 on success, errno otherwise.
- */
-static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                            void *owner)
-{
-       struct amdgpu_sync sync;
-       int r;
-
-       amdgpu_sync_create(&sync);
-       amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
-       r = amdgpu_sync_wait(&sync, true);
-       amdgpu_sync_free(&sync);
-
-       return r;
-}
-
 /**
  * amdgpu_vm_update_func - helper to call update function
  *
@@ -1451,7 +1426,8 @@ restart:
        params.adev = adev;
 
        if (vm->use_cpu_for_update) {
-               r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+               r = amdgpu_bo_sync_wait(vm->root.base.bo,
+                                       AMDGPU_FENCE_OWNER_VM, true);
                if (unlikely(r))
                        return r;
 
@@ -1772,9 +1748,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        params.adev = adev;
        params.vm = vm;
 
-       /* sync to everything on unmapping */
+       /* sync to everything except eviction fences on unmapping */
        if (!(flags & AMDGPU_PTE_VALID))
-               owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+               owner = AMDGPU_FENCE_OWNER_KFD;
 
        if (vm->use_cpu_for_update) {
                /* params.src is used as flag to indicate system Memory */
@@ -1784,7 +1760,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                /* Wait for PT BOs to be idle. PTs share the same resv. object
                 * as the root PD BO
                 */
-               r = amdgpu_vm_wait_pd(adev, vm, owner);
+               r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
                if (unlikely(r))
                        return r;
 
index db443ec53d3aeb5db36c09fdbf45f9287658f4d2..bea32f076b91c98dd346d178b73ea70f5dcfd845 100644 (file)
@@ -2980,7 +2980,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
-               unsigned long flags;
+       unsigned long flags;
        unsigned crtc_id;
        struct amdgpu_crtc *amdgpu_crtc;
        struct amdgpu_flip_work *works;
index b11a1c17a7f27e76cdfd4036a1a3331e3a93476a..73851ebb3833e5ce4cf9f51ba3aeede50ff919fa 100644 (file)
@@ -266,7 +266,8 @@ flr_done:
        }
 
        /* Trigger recovery for world switch failure if no TDR */
-       if (amdgpu_device_should_recover_gpu(adev))
+       if (amdgpu_device_should_recover_gpu(adev)
+               && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
                amdgpu_device_gpu_recover(adev, NULL);
 }
 
index 221f26e50322ed56e8da3ff0164a77e465ecf182..c69d51598cfe54e246022f5044bf3178dffbc8de 100644 (file)
@@ -32,7 +32,7 @@
 
 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
 {
-    u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+       u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
 
        tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
        tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
index 127b85983e8fbdaa3fd888c0fb75387db0cfdf43..c816e55d43a9a617cc0b8ef61b991cc03dd9cb04 100644 (file)
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
 
 static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
 {
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
 };
 
 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
index 79c1a9bbcc215e450e4c1c439f0258a82608ca74..9d8df68893b9d8a1be221adc9e6f3ccddd59e916 100644 (file)
@@ -1436,7 +1436,7 @@ static int si_common_early_init(void *handle)
                        AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_HDP_MGCG;
-                       adev->pg_flags = 0;
+               adev->pg_flags = 0;
                adev->external_rev_id = (adev->rev_id == 0) ? 1 :
                                        (adev->rev_id == 1) ? 5 : 6;
                break;
index da58040fdbdc6f27c1cc3eee7c1b822fcce21baf..41e01a7f57a4822cce1a7dcb5b971fad9597d719 100644 (file)
@@ -6216,10 +6216,12 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
                        si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
                        if (current_link_speed == AMDGPU_PCIE_GEN2)
                                break;
+                       /* fall through */
                case AMDGPU_PCIE_GEN2:
                        if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
                                break;
 #endif
+                       /* fall through */
                default:
                        si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
                        break;
index 4b5d60ea3e789fc5cf8393b129afe4071a7d76e0..a8e92638a2e83973c653b59d2ecc9279635828a3 100644 (file)
@@ -81,6 +81,10 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
        adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
        adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
        adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+
+       adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
+       adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
+
        /* In unit of dword doorbell */
        adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
        adev->doorbell_index.sdma_doorbell_range = 4;
index 53716c593b2b808816f55aa3650eaaebf7b60125..0db84386252a4969ccf0e203963681a00b196186 100644 (file)
@@ -85,6 +85,10 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
        adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
        adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
        adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+
+       adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
+       adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
+
        adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
        adev->doorbell_index.sdma_doorbell_range = 20;
 }
index 8372556b52eb72d755c09052a6df715c28c7c312..c6c9530e704e03f8c5fdda16ca3bd2b15673f5ce 100644 (file)
@@ -134,12 +134,18 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
                 */
                q->doorbell_id = q->properties.queue_id;
        } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-               /* For SDMA queues on SOC15, use static doorbell
-                * assignments based on the engine and queue.
+               /* For SDMA queues on SOC15 with 8-byte doorbell, use static
+                * doorbell assignments based on the engine and queue id.
+                * The doobell index distance between RLC (2*i) and (2*i+1)
+                * for a SDMA engine is 512.
                 */
-               q->doorbell_id = dev->shared_resources.sdma_doorbell
-                       [q->properties.sdma_engine_id]
-                       [q->properties.sdma_queue_id];
+               uint32_t *idx_offset =
+                               dev->shared_resources.sdma_doorbell_idx;
+
+               q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
+                       + (q->properties.sdma_queue_id & 1)
+                       * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+                       + (q->properties.sdma_queue_id >> 1);
        } else {
                /* For CP queues on SOC15 reserve a free doorbell ID */
                unsigned int found;
index 12b66330fc6d3cefaa0d1cbbfc1b270f6c380cce..0eeee3c6d6dcd067482354905a80cf52766a4fb9 100644 (file)
 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE
 
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE               \
+       (KFD_MAX_NUM_OF_PROCESSES *                     \
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/*
+ * 512 = 0x200
+ * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
+ * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
+ * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
+ * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
+ * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
+ */
+#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
+
+
 /*
  * Kernel module parameter to specify maximum number of supported queues per
  * device
  */
 extern int max_num_of_queues_per_device;
 
-#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE               \
-       (KFD_MAX_NUM_OF_PROCESSES *                     \
-                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
-
-#define KFD_KERNEL_QUEUE_SIZE 2048
 
 /* Kernel module parameter to specify the scheduling policy */
 extern int sched_policy;
index 80b36e860a0a8de2d30f4475b4802db3028b6cf2..4bdae78bab8e930bd5704c6344e0a68fe09d2126 100644 (file)
@@ -607,13 +607,17 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
        if (!qpd->doorbell_bitmap)
                return -ENOMEM;
 
-       /* Mask out any reserved doorbells */
-       for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
-               if ((dev->shared_resources.reserved_doorbell_mask & i) ==
-                   dev->shared_resources.reserved_doorbell_val) {
+       /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+       for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
+               if (i >= dev->shared_resources.non_cp_doorbells_start
+                       && i <= dev->shared_resources.non_cp_doorbells_end) {
                        set_bit(i, qpd->doorbell_bitmap);
-                       pr_debug("reserved doorbell 0x%03x\n", i);
+                       set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+                               qpd->doorbell_bitmap);
+                       pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
+                               i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
                }
+       }
 
        return 0;
 }
index 3a6f595f295e5a1927e35c8c5eb9508370749cab..2f26581b93ff5c4bacf77f3ca9d5619a2d979e32 100644 (file)
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
                return;
        }
 
+       /* Update to correct count(s) if racing with vblank irq */
+       amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
 
        /* wake up userspace */
        if (amdgpu_crtc->event) {
-               /* Update to correct count(s) if racing with vblank irq */
-               drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
                drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
 
                /* page flip completed. clean up */
@@ -786,12 +785,13 @@ static int dm_suspend(void *handle)
        struct amdgpu_display_manager *dm = &adev->dm;
        int ret = 0;
 
+       WARN_ON(adev->dm.cached_state);
+       adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
        s3_handle_mst(adev->ddev, true);
 
        amdgpu_dm_irq_suspend(adev);
 
-       WARN_ON(adev->dm.cached_state);
-       adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
 
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
@@ -3790,7 +3790,6 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
  * check will succeed, and let DC implement proper check
  */
 static const uint32_t rgb_formats[] = {
-       DRM_FORMAT_RGB888,
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
        DRM_FORMAT_RGBA8888,
@@ -4646,6 +4645,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        struct amdgpu_bo *abo;
        uint64_t tiling_flags, dcc_address;
        uint32_t target, target_vblank;
+       uint64_t last_flip_vblank;
+       bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
 
        struct {
                struct dc_surface_update surface_updates[MAX_SURFACES];
@@ -4678,10 +4679,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                struct dc_plane_state *dc_plane;
                struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
 
-               if (plane->type == DRM_PLANE_TYPE_CURSOR) {
-                       handle_cursor_update(plane, old_plane_state);
+               /* Cursor plane is handled after stream updates */
+               if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
-               }
 
                if (!fb || !crtc || pcrtc != crtc)
                        continue;
@@ -4712,14 +4712,21 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                         */
                        abo = gem_to_amdgpu_bo(fb->obj[0]);
                        r = amdgpu_bo_reserve(abo, true);
-                       if (unlikely(r != 0)) {
+                       if (unlikely(r != 0))
                                DRM_ERROR("failed to reserve buffer before flip\n");
-                               WARN_ON(1);
-                       }
 
-                       /* Wait for all fences on this FB */
-                       WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
-                                                                                   MAX_SCHEDULE_TIMEOUT) < 0);
+                       /*
+                        * Wait for all fences on this FB. Do limited wait to avoid
+                        * deadlock during GPU reset when this fence will not signal
+                        * but we hold reservation lock for the BO.
+                        */
+                       r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
+                                                               true, false,
+                                                               msecs_to_jiffies(5000));
+                       if (unlikely(r == 0))
+                               DRM_ERROR("Waiting for fences timed out.");
+
+
 
                        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
 
@@ -4799,7 +4806,31 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
         * hopefully eliminating dc_*_update structs in their entirety.
         */
        if (flip_count) {
-               target = (uint32_t)drm_crtc_vblank_count(pcrtc) + *wait_for_vblank;
+               if (!vrr_active) {
+                       /* Use old throttling in non-vrr fixed refresh rate mode
+                        * to keep flip scheduling based on target vblank counts
+                        * working in a backwards compatible way, e.g., for
+                        * clients using the GLX_OML_sync_control extension or
+                        * DRI3/Present extension with defined target_msc.
+                        */
+                       last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+               }
+               else {
+                       /* For variable refresh rate mode only:
+                        * Get vblank of last completed flip to avoid > 1 vrr
+                        * flips per video frame by use of throttling, but allow
+                        * flip programming anywhere in the possibly large
+                        * variable vrr vblank interval for fine-grained flip
+                        * timing control and more opportunity to avoid stutter
+                        * on late submission of flips.
+                        */
+                       spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+                       last_flip_vblank = acrtc_attach->last_flip_vblank;
+                       spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+               }
+
+               target = (uint32_t)last_flip_vblank + *wait_for_vblank;
+
                /* Prepare wait for target vblank early - before the fence-waits */
                target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
                                amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
@@ -4874,6 +4905,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                mutex_unlock(&dm->dc_lock);
        }
 
+       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
+               if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                       handle_cursor_update(plane, old_plane_state);
+
 cleanup:
        kfree(flip);
        kfree(full);
@@ -5799,14 +5834,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
                old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
                num_plane = 0;
 
-               if (!new_dm_crtc_state->stream) {
-                       if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
-                               update_type = UPDATE_TYPE_FULL;
-                               goto cleanup;
-                       }
+               if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
+                       update_type = UPDATE_TYPE_FULL;
+                       goto cleanup;
+               }
 
+               if (!new_dm_crtc_state->stream)
                        continue;
-               }
 
                for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
                        new_plane_crtc = new_plane_state->crtc;
@@ -5817,6 +5851,11 @@ dm_determine_update_type_for_commit(struct dc *dc,
                        if (plane->type == DRM_PLANE_TYPE_CURSOR)
                                continue;
 
+                       if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
+                               update_type = UPDATE_TYPE_FULL;
+                               goto cleanup;
+                       }
+
                        if (!state->allow_modeset)
                                continue;
 
@@ -5955,6 +5994,42 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
        }
 
+       /*
+        * Add all primary and overlay planes on the CRTC to the state
+        * whenever a plane is enabled to maintain correct z-ordering
+        * and to enable fast surface updates.
+        */
+       drm_for_each_crtc(crtc, dev) {
+               bool modified = false;
+
+               for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+                       if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                               continue;
+
+                       if (new_plane_state->crtc == crtc ||
+                           old_plane_state->crtc == crtc) {
+                               modified = true;
+                               break;
+                       }
+               }
+
+               if (!modified)
+                       continue;
+
+               drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+                       if (plane->type == DRM_PLANE_TYPE_CURSOR)
+                               continue;
+
+                       new_plane_state =
+                               drm_atomic_get_plane_state(state, plane);
+
+                       if (IS_ERR(new_plane_state)) {
+                               ret = PTR_ERR(new_plane_state);
+                               goto fail;
+                       }
+               }
+       }
+
        /* Remove exiting planes if they are modified */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
                ret = dm_update_plane_state(dc, state, plane,
index a1c56f29cfeb2fe745fe97550288e77b1dfccc6b..fd5266a58297d566247a8dbc9835f01cdcfbf6a7 100644 (file)
@@ -265,6 +265,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
                                        && id.enum_id == obj_id.enum_id)
                                return &bp->object_info_tbl.v1_4->display_path[i];
                }
+               /* fall through */
        case OBJECT_TYPE_CONNECTOR:
        case OBJECT_TYPE_GENERIC:
                /* Both Generic and Connector Object ID
@@ -277,6 +278,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
                                        && id.enum_id == obj_id.enum_id)
                                return &bp->object_info_tbl.v1_4->display_path[i];
                }
+               /* fall through */
        default:
                return NULL;
        }
index 52f838442e21082ae1d0fa20beed15905b7414c9..c68fbd55db3ca6f01c49b86e60a584dfe8d90ff4 100644 (file)
@@ -1138,6 +1138,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        /* pplib is notified if disp_num changed */
        dc->hwss.optimize_bandwidth(dc, context);
 
+       for (i = 0; i < context->stream_count; i++)
+               context->streams[i]->mode_changed = false;
+
        dc_release_state(dc->current_state);
 
        dc->current_state = context;
@@ -1623,13 +1626,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
                                        stream_update->adjust->v_total_min,
                                        stream_update->adjust->v_total_max);
 
-                       if (stream_update->periodic_vsync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
-                               pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
-                                       pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE0, &stream->periodic_vsync_config);
+                       if (stream_update->periodic_interrupt0 &&
+                                       dc->hwss.setup_periodic_interrupt)
+                               dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
 
-                       if (stream_update->enhanced_sync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
-                               pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
-                                       pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE1, &stream->enhanced_sync_config);
+                       if (stream_update->periodic_interrupt1 &&
+                                       dc->hwss.setup_periodic_interrupt)
+                               dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
 
                        if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
                                        stream_update->vrr_infopacket ||
index a798694992b9eb73168f0f5cfbca47203ec9be0a..5657cb3a2ad358c6729cacb9529d4cbfb857389b 100644 (file)
@@ -51,9 +51,19 @@ struct freesync_context {
        bool dummy;
 };
 
-union vline_config {
-       unsigned int line_number;
-       unsigned long long delta_in_ns;
+enum vertical_interrupt_ref_point {
+       START_V_UPDATE = 0,
+       START_V_SYNC,
+       INVALID_POINT
+
+       //For now, only v_update interrupt is used.
+       //START_V_BLANK,
+       //START_V_ACTIVE
+};
+
+struct periodic_interrupt_config {
+       enum vertical_interrupt_ref_point ref_point;
+       int lines_offset;
 };
 
 
@@ -106,8 +116,8 @@ struct dc_stream_state {
        /* DMCU info */
        unsigned int abm_level;
 
-       union vline_config periodic_vsync_config;
-       union vline_config enhanced_sync_config;
+       struct periodic_interrupt_config periodic_interrupt0;
+       struct periodic_interrupt_config periodic_interrupt1;
 
        /* from core_stream struct */
        struct dc_context *ctx;
@@ -158,8 +168,8 @@ struct dc_stream_update {
        struct dc_info_packet *hdr_static_metadata;
        unsigned int *abm_level;
 
-       union vline_config *periodic_vsync_config;
-       union vline_config *enhanced_sync_config;
+       struct periodic_interrupt_config *periodic_interrupt0;
+       struct periodic_interrupt_config *periodic_interrupt1;
 
        struct dc_crtc_timing_adjust *adjust;
        struct dc_info_packet *vrr_infopacket;
index 01e56f1a9f34d42b64ed88543dacbea3da4a281b..da96229db53a76a91e739d3f85bbadc8df78f689 100644 (file)
 
 #define MCP_DISABLE_ABM_IMMEDIATELY 255
 
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+{
+       struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+       uint32_t rampingBoundary = 0xFFFF;
+
+       REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+                       1, 80000);
+
+       /* set ramping boundary */
+       REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+       /* setDMCUParam_Pipe */
+       REG_UPDATE_2(MASTER_COMM_CMD_REG,
+                       MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+                       MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+       /* notifyDMCUMsg */
+       REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+       return true;
+}
 
 static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
 {
@@ -175,7 +196,6 @@ static void dmcu_set_backlight_level(
        uint32_t controller_id)
 {
        unsigned int backlight_8_bit = 0;
-       uint32_t rampingBoundary = 0xFFFF;
        uint32_t s2;
 
        if (backlight_pwm_u16_16 & 0x10000)
@@ -185,16 +205,7 @@ static void dmcu_set_backlight_level(
                // Take MSB of fractional part since backlight is not max
                backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
 
-       /* set ramping boundary */
-       REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
-
-       /* setDMCUParam_Pipe */
-       REG_UPDATE_2(MASTER_COMM_CMD_REG,
-                       MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
-                       MASTER_COMM_CMD_REG_BYTE1, controller_id);
-
-       /* notifyDMCUMsg */
-       REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+       dce_abm_set_pipe(&abm_dce->base, controller_id);
 
        /* waitDMCUReadyForCmd */
        REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -309,16 +320,7 @@ static bool dce_abm_immediate_disable(struct abm *abm)
 {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
 
-       REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
-                       1, 80000);
-
-       /* setDMCUParam_ABMLevel */
-       REG_UPDATE_2(MASTER_COMM_CMD_REG,
-                       MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
-                       MASTER_COMM_CMD_REG_BYTE1, MCP_DISABLE_ABM_IMMEDIATELY);
-
-       /* notifyDMCUMsg */
-       REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+       dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
 
        abm->stored_backlight_registers.BL_PWM_CNTL =
                REG_READ(BL_PWM_CNTL);
@@ -419,6 +421,7 @@ static const struct abm_funcs dce_funcs = {
        .abm_init = dce_abm_init,
        .set_abm_level = dce_abm_set_level,
        .init_backlight = dce_abm_init_backlight,
+       .set_pipe = dce_abm_set_pipe,
        .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
        .get_current_backlight = dce_abm_get_current_backlight,
        .get_target_backlight = dce_abm_get_target_backlight,
index bbe051736a1826be78599264e4aedc53e762b238..6e142c2db986537a67cc5dd78615052dd6ba37e0 100644 (file)
@@ -696,6 +696,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
 {
        struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
        struct dm_pp_power_level_change_request level_change_req;
+       int patched_disp_clk = context->bw.dce.dispclk_khz;
+
+       /*TODO: W/A for dal3 linux, investigate why this works */
+       if (!clk_mgr_dce->dfs_bypass_active)
+               patched_disp_clk = patched_disp_clk * 115 / 100;
 
        level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
        /* get max clock state from PPLIB */
@@ -705,9 +710,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
                        clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
        }
 
-       if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
-               context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
-               clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+       if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
+               context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+               clk_mgr->clks.dispclk_khz = patched_disp_clk;
        }
        dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
 }
index 85686d9176364e0373248c4808eb631b5169173a..a24a2bda8656baf9c9ee81e215535824f17798cd 100644 (file)
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                sign = 1;
                floating = 1;
-               /* no break */
+               /* fall through */
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
                grph_depth = 3;
index acd418515346c49fb263f98ae27995f957f959db..a6b80fdaa666a12240b1ddfc0268a31c49aafd61 100644 (file)
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
                struct dc *dc,
                struct dc_state *context);
 
+void dce100_optimize_bandwidth(
+               struct dc *dc,
+               struct dc_state *context);
+
 bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
                                        struct dc_bios *dcb,
                                        enum pipe_gating_control power_gating);
index e1b285ea01ac30791343ecdc510ae9462542c9fa..5e4db3712eefaf2c5b40c58ecc44d6eeded52d4e 100644 (file)
@@ -1300,6 +1300,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        struct drr_params params = {0};
        unsigned int event_triggers = 0;
 
+       if (dc->hwss.disable_stream_gating) {
+               dc->hwss.disable_stream_gating(dc, pipe_ctx);
+       }
+
        if (pipe_ctx->stream_res.audio != NULL) {
                struct audio_output audio_output;
 
@@ -1329,10 +1333,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        if (!pipe_ctx->stream->apply_seamless_boot_optimization)
                dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
 
-       if (pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt)
-               pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt(
-                                               pipe_ctx->stream_res.tg,
-                                                       &stream->timing);
+       if (dc->hwss.setup_vupdate_interrupt)
+               dc->hwss.setup_vupdate_interrupt(pipe_ctx);
 
        params.vertical_total_min = stream->adjust.v_total_min;
        params.vertical_total_max = stream->adjust.v_total_max;
@@ -1521,6 +1523,14 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
        struct dc_link *edp_link = get_link_for_edp(dc);
        bool can_edp_fast_boot_optimize = false;
        bool apply_edp_fast_boot_optimization = false;
+       bool can_apply_seamless_boot = false;
+
+       for (i = 0; i < context->stream_count; i++) {
+               if (context->streams[i]->apply_seamless_boot_optimization) {
+                       can_apply_seamless_boot = true;
+                       break;
+               }
+       }
 
        if (edp_link) {
                /* this seems to cause blank screens on DCE8 */
@@ -1549,7 +1559,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                }
        }
 
-       if (!apply_edp_fast_boot_optimization) {
+       if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) {
                if (edp_link_to_turnoff) {
                        /*turn off backlight before DP_blank and encoder powered down*/
                        dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
@@ -2676,6 +2686,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .set_static_screen_control = set_static_screen_control,
        .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
        .enable_stream_timing = dce110_enable_stream_timing,
+       .disable_stream_gating = NULL,
+       .enable_stream_gating = NULL,
        .setup_stereo = NULL,
        .set_avmute = dce110_set_avmute,
        .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
index a60a90e68d91837d67c9331217f7bdbada9f7b07..c4543178ba2095a84123dd15c396e216fc27162b 100644 (file)
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
        dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
        dc->hwss.pipe_control_lock = dce_pipe_control_lock;
        dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
-       dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+       dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
 }
 
index 2eca81b5cf2f770cc08bba6eecb27d9d2afdff90..c109ace96be985764fadf060b6f19730908df177 100644 (file)
@@ -792,9 +792,22 @@ bool dce80_validate_bandwidth(
        struct dc *dc,
        struct dc_state *context)
 {
-       /* TODO implement when needed but for now hardcode max value*/
-       context->bw.dce.dispclk_khz = 681000;
-       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+       int i;
+       bool at_least_one_pipe = false;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].stream)
+                       at_least_one_pipe = true;
+       }
+
+       if (at_least_one_pipe) {
+               /* TODO implement when needed but for now hardcode max value*/
+               context->bw.dce.dispclk_khz = 681000;
+               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+       } else {
+               context->bw.dce.dispclk_khz = 0;
+               context->bw.dce.yclk_khz = 0;
+       }
 
        return true;
 }
index 117d9d8227f7429f09871abfa6823ebccc6849e3..d1a8f1c302a9603199e4690e8f7141ff984272a2 100644 (file)
@@ -959,9 +959,25 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
 static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
 {
        int i;
+       bool can_apply_seamless_boot = false;
+
+       for (i = 0; i < context->stream_count; i++) {
+               if (context->streams[i]->apply_seamless_boot_optimization) {
+                       can_apply_seamless_boot = true;
+                       break;
+               }
+       }
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct timing_generator *tg = dc->res_pool->timing_generators[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               /* There is assumption that pipe_ctx is not mapping irregularly
+                * to non-preferred front end. If pipe_ctx->stream is not NULL,
+                * we will use the pipe, so don't disable
+                */
+               if (pipe_ctx->stream != NULL)
+                       continue;
 
                if (tg->funcs->is_tg_enabled(tg))
                        tg->funcs->lock(tg);
@@ -975,7 +991,9 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
                }
        }
 
-       dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+       /* Cannot reset the MPC mux if seamless boot */
+       if (!can_apply_seamless_boot)
+               dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct timing_generator *tg = dc->res_pool->timing_generators[i];
@@ -983,6 +1001,16 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
                struct dpp *dpp = dc->res_pool->dpps[i];
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
+               // W/A for issue with dc_post_update_surfaces_to_stream
+               hubp->power_gated = true;
+
+               /* There is assumption that pipe_ctx is not mapping irregularly
+                * to non-preferred front end. If pipe_ctx->stream is not NULL,
+                * we will use the pipe, so don't disable
+                */
+               if (pipe_ctx->stream != NULL)
+                       continue;
+
                dpp->funcs->dpp_reset(dpp);
 
                pipe_ctx->stream_res.tg = tg;
@@ -1137,11 +1165,13 @@ static void reset_hw_ctx_wrap(
                        struct clock_source *old_clk = pipe_ctx_old->clock_source;
 
                        reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+                       if (dc->hwss.enable_stream_gating) {
+                               dc->hwss.enable_stream_gating(dc, pipe_ctx);
+                       }
                        if (old_clk)
                                old_clk->funcs->cs_power_down(old_clk);
                }
        }
-
 }
 
 static bool patch_address_for_sbs_tb_stereo(
@@ -2162,8 +2192,10 @@ static void dcn10_blank_pixel_data(
        if (!blank) {
                if (stream_res->tg->funcs->set_blank)
                        stream_res->tg->funcs->set_blank(stream_res->tg, blank);
-               if (stream_res->abm)
+               if (stream_res->abm) {
+                       stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
                        stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
+               }
        } else if (blank) {
                if (stream_res->abm)
                        stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
@@ -2661,8 +2693,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
                .mirror = pipe_ctx->plane_state->horizontal_mirror
        };
 
-       pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
-       pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
+       pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+       pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
 
        if (pipe_ctx->plane_state->address.type
                        == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2709,6 +2741,147 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.dpp, &opt_attr);
 }
 
+/**
+* apply_front_porch_workaround  TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void apply_front_porch_workaround(
+       struct dc_crtc_timing *timing)
+{
+       if (timing->flags.INTERLACE == 1) {
+               if (timing->v_front_porch < 2)
+                       timing->v_front_porch = 2;
+       } else {
+               if (timing->v_front_porch < 1)
+                       timing->v_front_porch = 1;
+       }
+}
+
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+{
+       struct timing_generator *optc = pipe_ctx->stream_res.tg;
+       const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+       struct dc_crtc_timing patched_crtc_timing;
+       int vesa_sync_start;
+       int asic_blank_end;
+       int interlace_factor;
+       int vertical_line_start;
+
+       patched_crtc_timing = *dc_crtc_timing;
+       apply_front_porch_workaround(&patched_crtc_timing);
+
+       interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+       vesa_sync_start = patched_crtc_timing.v_addressable +
+                       patched_crtc_timing.v_border_bottom +
+                       patched_crtc_timing.v_front_porch;
+
+       asic_blank_end = (patched_crtc_timing.v_total -
+                       vesa_sync_start -
+                       patched_crtc_timing.v_border_top)
+                       * interlace_factor;
+
+       vertical_line_start = asic_blank_end -
+                       optc->dlg_otg_param.vstartup_start + 1;
+
+       return vertical_line_start;
+}
+
+static void calc_vupdate_position(
+               struct pipe_ctx *pipe_ctx,
+               uint32_t *start_line,
+               uint32_t *end_line)
+{
+       const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+       int vline_int_offset_from_vupdate =
+                       pipe_ctx->stream->periodic_interrupt0.lines_offset;
+       int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+       int start_position;
+
+       if (vline_int_offset_from_vupdate > 0)
+               vline_int_offset_from_vupdate--;
+       else if (vline_int_offset_from_vupdate < 0)
+               vline_int_offset_from_vupdate++;
+
+       start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+
+       if (start_position >= 0)
+               *start_line = start_position;
+       else
+               *start_line = dc_crtc_timing->v_total + start_position - 1;
+
+       *end_line = *start_line + 2;
+
+       if (*end_line >= dc_crtc_timing->v_total)
+               *end_line = 2;
+}
+
+static void cal_vline_position(
+               struct pipe_ctx *pipe_ctx,
+               enum vline_select vline,
+               uint32_t *start_line,
+               uint32_t *end_line)
+{
+       enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
+
+       if (vline == VLINE0)
+               ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
+       else if (vline == VLINE1)
+               ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
+
+       switch (ref_point) {
+       case START_V_UPDATE:
+               calc_vupdate_position(
+                               pipe_ctx,
+                               start_line,
+                               end_line);
+               break;
+       case START_V_SYNC:
+               // Suppose to do nothing because vsync is 0;
+               break;
+       default:
+               ASSERT(0);
+               break;
+       }
+}
+
+static void dcn10_setup_periodic_interrupt(
+               struct pipe_ctx *pipe_ctx,
+               enum vline_select vline)
+{
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+       if (vline == VLINE0) {
+               uint32_t start_line = 0;
+               uint32_t end_line = 0;
+
+               cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+
+               tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
+
+       } else if (vline == VLINE1) {
+               pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
+                               tg,
+                               pipe_ctx->stream->periodic_interrupt1.lines_offset);
+       }
+}
+
+static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+{
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+       int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+
+       if (start_line < 0) {
+               ASSERT(0);
+               start_line = 0;
+       }
+
+       if (tg->funcs->setup_vertical_interrupt2)
+               tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
 static const struct hw_sequencer_funcs dcn10_funcs = {
        .program_gamut_remap = program_gamut_remap,
        .init_hw = dcn10_init_hw,
@@ -2756,7 +2929,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
        .set_cursor_attribute = dcn10_set_cursor_attribute,
-       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
+       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+       .disable_stream_gating = NULL,
+       .enable_stream_gating = NULL,
+       .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+       .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt
 };
 
 
index f8eea10e4c6453412a8dd22f763227dd76351142..6d66084df55f53cd6e370997f471f49528dcf9fd 100644 (file)
@@ -81,4 +81,6 @@ struct pipe_ctx *find_top_pipe_for_stream(
                struct dc_state *context,
                const struct dc_stream_state *stream);
 
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+
 #endif /* __DC_HWSS_DCN10_H__ */
index 2f78a84f0dcbb9a0fdb9ecda3824e0ffb8e413fc..0345d51e9d6f39a570c6d1b753913dba1708b10b 100644 (file)
@@ -92,134 +92,36 @@ static void optc1_disable_stereo(struct timing_generator *optc)
                OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
 }
 
-static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
-{
-       struct dc_crtc_timing patched_crtc_timing;
-       int vesa_sync_start;
-       int asic_blank_end;
-       int interlace_factor;
-       int vertical_line_start;
-
-       patched_crtc_timing = *dc_crtc_timing;
-       optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
-       vesa_sync_start = patched_crtc_timing.h_addressable +
-                       patched_crtc_timing.h_border_right +
-                       patched_crtc_timing.h_front_porch;
-
-       asic_blank_end = patched_crtc_timing.h_total -
-                       vesa_sync_start -
-                       patched_crtc_timing.h_border_left;
-
-       interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
-       vesa_sync_start = patched_crtc_timing.v_addressable +
-                       patched_crtc_timing.v_border_bottom +
-                       patched_crtc_timing.v_front_porch;
-
-       asic_blank_end = (patched_crtc_timing.v_total -
-                       vesa_sync_start -
-                       patched_crtc_timing.v_border_top)
-                       * interlace_factor;
-
-       vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
-       if (vertical_line_start < 0) {
-               ASSERT(0);
-               vertical_line_start = 0;
-       }
-
-       return vertical_line_start;
-}
-
-static void calc_vline_position(
+void optc1_setup_vertical_interrupt0(
                struct timing_generator *optc,
-               const struct dc_crtc_timing *dc_crtc_timing,
-               unsigned long long vsync_delta,
-               uint32_t *start_line,
-               uint32_t *end_line)
+               uint32_t start_line,
+               uint32_t end_line)
 {
-       unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
-       unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
-       uint32_t req_delta_lines = (uint32_t) div64_u64(
-                       (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
-                                                               dc_crtc_timing->h_total);
-
-       uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
-
-       if (req_delta_lines != 0)
-                       req_delta_lines--;
-
-               if (req_delta_lines > vsync_line)
-                       *start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
-               else
-                       *start_line = vsync_line - req_delta_lines;
-
-               *end_line = *start_line + 2;
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
 
-               if (*end_line >= dc_crtc_timing->v_total)
-                       *end_line = 2;
+       REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
+                       OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
+                       OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
 }
 
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt1(
                struct timing_generator *optc,
-               const struct dc_crtc_timing *dc_crtc_timing,
-               enum vline_select vline,
-               const union vline_config *vline_config)
+               uint32_t start_line)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
-       uint32_t start_line = 0;
-       uint32_t end_line = 0;
-
-       switch (vline) {
-       case VLINE0:
-               calc_vline_position(optc, dc_crtc_timing, vline_config->delta_in_ns, &start_line, &end_line);
-               REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
-                               OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
-                               OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
-               break;
-       case VLINE1:
-               REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
-                                       OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config->line_number);
-               break;
-       default:
-               break;
-       }
+
+       REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
+                               OTG_VERTICAL_INTERRUPT1_LINE_START, start_line);
 }
 
-void optc1_program_vupdate_interrupt(
+void optc1_setup_vertical_interrupt2(
                struct timing_generator *optc,
-               const struct dc_crtc_timing *dc_crtc_timing)
+               uint32_t start_line)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
-       int32_t vertical_line_start;
-       uint32_t asic_blank_end;
-       uint32_t vesa_sync_start;
-       struct dc_crtc_timing patched_crtc_timing;
-
-       patched_crtc_timing = *dc_crtc_timing;
-       optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
-       /* asic_h_blank_end = HsyncWidth + HbackPorch =
-        * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
-        * vesa.h_left_border
-        */
-       vesa_sync_start = patched_crtc_timing.h_addressable +
-                       patched_crtc_timing.h_border_right +
-                       patched_crtc_timing.h_front_porch;
-
-       asic_blank_end = patched_crtc_timing.h_total -
-                       vesa_sync_start -
-                       patched_crtc_timing.h_border_left;
-
-       /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
-        * program the reg for interrupt postition.
-        */
-       vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
-       if (vertical_line_start < 0)
-               vertical_line_start = 0;
 
        REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
-                       OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+                       OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
 }
 
 /**
@@ -1480,8 +1382,9 @@ bool optc1_get_crc(struct timing_generator *optc,
 static const struct timing_generator_funcs dcn10_tg_funcs = {
                .validate_timing = optc1_validate_timing,
                .program_timing = optc1_program_timing,
-               .program_vline_interrupt = optc1_program_vline_interrupt,
-               .program_vupdate_interrupt = optc1_program_vupdate_interrupt,
+               .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+               .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+               .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
                .program_global_sync = optc1_program_global_sync,
                .enable_crtc = optc1_enable_crtc,
                .disable_crtc = optc1_disable_crtc,
index 24452f11c5981da723d8c90634887557239ad5f6..4eb9a898c237daadb7789c31f5fc46744a556bfb 100644 (file)
@@ -483,11 +483,16 @@ void optc1_program_timing(
        const struct dc_crtc_timing *dc_crtc_timing,
        bool use_vbios);
 
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt0(
                struct timing_generator *optc,
-               const struct dc_crtc_timing *dc_crtc_timing,
-               enum vline_select vline,
-               const union vline_config *vline_config);
+               uint32_t start_line,
+               uint32_t end_line);
+void optc1_setup_vertical_interrupt1(
+               struct timing_generator *optc,
+               uint32_t start_line);
+void optc1_setup_vertical_interrupt2(
+               struct timing_generator *optc,
+               uint32_t start_line);
 
 void optc1_program_global_sync(
                struct timing_generator *optc);
index abc961c0906ea7729d49f0606dcfe6150d1a584e..86dc39a0240872545e5a2b98330f9ecd1f634227 100644 (file)
@@ -46,6 +46,7 @@ struct abm_funcs {
        void (*abm_init)(struct abm *abm);
        bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
        bool (*set_abm_immediate_disable)(struct abm *abm);
+       bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
        bool (*init_backlight)(struct abm *abm);
 
        /* backlight_pwm_u16_16 is unsigned 32 bit,
index 03ae941895f3df038a5d3dc9632185b0970875bd..c25f7df7b5e3f65251058f39751410f0e13c5074 100644 (file)
@@ -134,14 +134,6 @@ struct dc_crtc_timing;
 
 struct drr_params;
 
-union vline_config;
-
-
-enum vline_select {
-       VLINE0,
-       VLINE1,
-       VLINE2
-};
 
 struct timing_generator_funcs {
        bool (*validate_timing)(struct timing_generator *tg,
@@ -149,14 +141,17 @@ struct timing_generator_funcs {
        void (*program_timing)(struct timing_generator *tg,
                                                        const struct dc_crtc_timing *timing,
                                                        bool use_vbios);
-       void (*program_vline_interrupt)(
+       void (*setup_vertical_interrupt0)(
+                       struct timing_generator *optc,
+                       uint32_t start_line,
+                       uint32_t end_line);
+       void (*setup_vertical_interrupt1)(
+                       struct timing_generator *optc,
+                       uint32_t start_line);
+       void (*setup_vertical_interrupt2)(
                        struct timing_generator *optc,
-                       const struct dc_crtc_timing *dc_crtc_timing,
-                       enum vline_select vline,
-                       const union vline_config *vline_config);
+                       uint32_t start_line);
 
-       void (*program_vupdate_interrupt)(struct timing_generator *optc,
-                       const struct dc_crtc_timing *dc_crtc_timing);
        bool (*enable_crtc)(struct timing_generator *tg);
        bool (*disable_crtc)(struct timing_generator *tg);
        bool (*is_counter_moving)(struct timing_generator *tg);
index 341b4810288c25315fc2d6dc1ef1b03487b4b412..7676f25216b191c22b1616e1622e8582e899a4dd 100644 (file)
@@ -38,6 +38,11 @@ enum pipe_gating_control {
        PIPE_GATING_CONTROL_INIT
 };
 
+enum vline_select {
+       VLINE0,
+       VLINE1
+};
+
 struct dce_hwseq_wa {
        bool blnd_crtc_trigger;
        bool DEGVIDCN10_253;
@@ -68,6 +73,10 @@ struct stream_resource;
 
 struct hw_sequencer_funcs {
 
+       void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+       void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
        void (*init_hw)(struct dc *dc);
 
        void (*init_pipes)(struct dc *dc, struct dc_state *context);
@@ -220,6 +229,9 @@ struct hw_sequencer_funcs {
        void (*set_cursor_attribute)(struct pipe_ctx *pipe);
        void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
 
+       void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
+       void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
+
 };
 
 void color_space_to_black_color(
index 4f501ddcfb8d48eb89119164a15196d2e7e474b5..34d6fdcb32e2f15c07a8c547e9360878ce6d184c 100644 (file)
 #define INTERNAL_REV_RAVEN_A0             0x00    /* First spin of Raven */
 #define RAVEN_A0 0x01
 #define RAVEN_B0 0x21
+#define PICASSO_A0 0x41
 #if defined(CONFIG_DRM_AMD_DC_DCN1_01)
 /* DCN1_01 */
 #define RAVEN2_A0 0x81
 
 #define        FAMILY_UNKNOWN 0xFF
 
+
+
 #endif /* __DAL_ASIC_ID_H__ */
index 3ba87b0762875c513f1c7bf59865ec1579e53ba7..038b88221c5fc97c2e8887ef5023d664eecc18e9 100644 (file)
@@ -165,18 +165,11 @@ struct iram_table_v_2_2 {
 };
 #pragma pack(pop)
 
-static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
-{
-       return (uint16_t)(backlight_8bit * 0x101);
-}
-
 static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
                struct iram_table_v_2 *table)
 {
        unsigned int i;
        unsigned int num_entries = NUM_BL_CURVE_SEGS;
-       unsigned int query_input_8bit;
-       unsigned int query_output_8bit;
        unsigned int lut_index;
 
        table->backlight_thresholds[0] = 0;
@@ -194,16 +187,13 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
         * format U4.10.
         */
        for (i = 1; i+1 < num_entries; i++) {
-               query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
                lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
                ASSERT(lut_index < params.backlight_lut_array_size);
-               query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
 
                table->backlight_thresholds[i] =
-                               backlight_8_to_16(query_input_8bit);
+                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
                table->backlight_offsets[i] =
-                               backlight_8_to_16(query_output_8bit);
+                       cpu_to_be16(params.backlight_lut_array[lut_index]);
        }
 }
 
@@ -212,8 +202,6 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
 {
        unsigned int i;
        unsigned int num_entries = NUM_BL_CURVE_SEGS;
-       unsigned int query_input_8bit;
-       unsigned int query_output_8bit;
        unsigned int lut_index;
 
        table->backlight_thresholds[0] = 0;
@@ -231,16 +219,13 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
         * format U4.10.
         */
        for (i = 1; i+1 < num_entries; i++) {
-               query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
                lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
                ASSERT(lut_index < params.backlight_lut_array_size);
-               query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
 
                table->backlight_thresholds[i] =
-                               backlight_8_to_16(query_input_8bit);
+                       cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
                table->backlight_offsets[i] =
-                               backlight_8_to_16(query_output_8bit);
+                       cpu_to_be16(params.backlight_lut_array[lut_index]);
        }
 }
 
index 83d960110d23e0a369e8895859d4197442ac47d3..5f3c10ebff080b53b8fd7c7801cb680c2c6214a7 100644 (file)
@@ -137,20 +137,17 @@ struct kgd2kfd_shared_resources {
        /* Bit n == 1 means Queue n is available for KFD */
        DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
 
-       /* Doorbell assignments (SOC15 and later chips only). Only
+       /* SDMA doorbell assignments (SOC15 and later chips only). Only
         * specific doorbells are routed to each SDMA engine. Others
         * are routed to IH and VCN. They are not usable by the CP.
-        *
-        * Any doorbell number D that satisfies the following condition
-        * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
-        *
-        * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
-        * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
-        * mask would be set to 0x1e0 and val set to 0x0e0.
         */
-       unsigned int sdma_doorbell[2][8];
-       unsigned int reserved_doorbell_mask;
-       unsigned int reserved_doorbell_val;
+       uint32_t *sdma_doorbell_idx;
+
+       /* From SOC15 onward, the doorbell index range not usable for CP
+        * queues.
+        */
+       uint32_t non_cp_doorbells_start;
+       uint32_t non_cp_doorbells_end;
 
        /* Base address of doorbell aperture. */
        phys_addr_t doorbell_physical_address;
index 5273de3c5b9821422efde2440a1592a8474ebe9d..0ad8fe4a6277ee35406320392f961d52456f81fa 100644 (file)
@@ -139,12 +139,10 @@ static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
 static int smu10_init_dynamic_state_adjustment_rule_settings(
                                                        struct pp_hwmgr *hwmgr)
 {
-       uint32_t table_size =
-               sizeof(struct phm_clock_voltage_dependency_table) +
-               (7 * sizeof(struct phm_clock_voltage_dependency_record));
+       struct phm_clock_voltage_dependency_table *table_clk_vlt;
 
-       struct phm_clock_voltage_dependency_table *table_clk_vlt =
-                                       kzalloc(table_size, GFP_KERNEL);
+       table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+                               GFP_KERNEL);
 
        if (NULL == table_clk_vlt) {
                pr_err("Can not allocate memory!\n");
index c8f5c00dd1e775e40e2d986dcfc167b76e6927df..48187acac59e7bf1181565e852584fe856f26efc 100644 (file)
@@ -3681,10 +3681,12 @@ static int smu7_request_link_speed_change_before_state_change(
                        data->force_pcie_gen = PP_PCIEGen2;
                        if (current_link_speed == PP_PCIEGen2)
                                break;
+                       /* fall through */
                case PP_PCIEGen2:
                        if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
                                break;
 #endif
+                       /* fall through */
                default:
                        data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
                        break;
index d138ddae563d2cfdf6ff9c878a42e971612509fb..58f5589aaf126add63407b058e80e7d4bd2a4319 100644 (file)
@@ -1211,7 +1211,7 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
                                hwmgr->platform_descriptor.TDPAdjustment :
                                (-1 * hwmgr->platform_descriptor.TDPAdjustment);
 
-                if (hwmgr->chip_id > CHIP_TONGA)
+               if (hwmgr->chip_id > CHIP_TONGA)
                        target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
                else
                        target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
index 553a203ac47c4766f7664d46d972ce254ba3fce0..019d6a206492b05604e42678d5d41a87f6c4e74a 100644 (file)
@@ -272,12 +272,10 @@ static int smu8_init_dynamic_state_adjustment_rule_settings(
                        struct pp_hwmgr *hwmgr,
                        ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
 {
-       uint32_t table_size =
-               sizeof(struct phm_clock_voltage_dependency_table) +
-               (7 * sizeof(struct phm_clock_voltage_dependency_record));
+       struct phm_clock_voltage_dependency_table *table_clk_vlt;
 
-       struct phm_clock_voltage_dependency_table *table_clk_vlt =
-                                       kzalloc(table_size, GFP_KERNEL);
+       table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+                               GFP_KERNEL);
 
        if (NULL == table_clk_vlt) {
                pr_err("Can not allocate memory!\n");
index f94dab27f486f6d11713d9508d458c2efe8e05c9..7337be5602e4bea84885e18fa98f66b3e7026874 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 #include "amdgpu.h"
 #include "soc15.h"
 #include "soc15_hw_ip.h"
@@ -114,7 +136,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
                if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
                                             ARRAY_SIZE(pre_baco_tbl))) {
                        if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
-                               return -1;
+                               return -EINVAL;
 
                        if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
                                                   ARRAY_SIZE(enter_baco_tbl)))
@@ -132,5 +154,5 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
                }
        }
 
-       return -1;
+       return -EINVAL;
 }
index a93b1e6d1c66061704a755a6666a6278cb718a66..f7a3ffa744b32c6c24622cdfcf5fcd436604a316 100644 (file)
@@ -20,8 +20,8 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef __VEGA10_BOCO_H__
-#define __VEGA10_BOCO_H__
+#ifndef __VEGA10_BACO_H__
+#define __VEGA10_BACO_H__
 #include "hwmgr.h"
 #include "common_baco.h"
 
index 0d883b358df22c738f9b1c13596b0ddd6d3fe541..5e8602a79b1c30d0a63bd97e7485740884e1a875 100644 (file)
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
 #include "amdgpu.h"
 #include "soc15.h"
 #include "soc15_hw_ip.h"
@@ -67,14 +89,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
 
 
                if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
-                       return -1;
+                       return -EINVAL;
 
        } else if (state == BACO_STATE_OUT) {
                if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
-                       return -1;
+                       return -EINVAL;
                if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
                                                     ARRAY_SIZE(clean_baco_tbl)))
-                       return -1;
+                       return -EINVAL;
        }
 
        return 0;
index c51988a9ed774b5749d80b88a909cd6af7f1f607..51c7f83929254832e6f6aaa87902e140fd959c0e 100644 (file)
@@ -20,8 +20,8 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  *
  */
-#ifndef __VEGA20_BOCO_H__
-#define __VEGA20_BOCO_H__
+#ifndef __VEGA20_BACO_H__
+#define __VEGA20_BACO_H__
 #include "hwmgr.h"
 #include "common_baco.h"
 
index 0769b1ec562b498fc6d75cc739cdeed1a8888cf5..aad79affb08123a6acb21dcf03d80bbfbf5609d3 100644 (file)
@@ -3456,7 +3456,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
        disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
                            !hwmgr->display_config->multi_monitor_in_sync) ||
                             vblank_too_short;
-    latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+       latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
 
        /* gfxclk */
        dpm_table = &(data->dpm_table.gfx_table);
index a6edd5df33b0fa0cf9b4b3ed8dd694ba9898b14b..4240aeec9000e9f0e9677d741dfa4d0f180fa196 100644 (file)
 #include <drm/amdgpu_drm.h>
 #include "smumgr.h"
 
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
 MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
index cb55bdc36f3f01c612a3a187748520465b405675..6b6e037258c301031ae70b7dc962ae32180bfa11 100644 (file)
@@ -145,6 +145,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
        if (IS_ERR(dev))
                return PTR_ERR(dev);
 
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_free_dev;
+
        dev->pdev = pdev;
        pci_set_drvdata(pdev, dev);
 
index c53ecbd9abddd476a1f2acf12c8c462a983dd4ac..540a77a2ade9d80bb350185286c8e795d9c99a64 100644 (file)
@@ -1608,6 +1608,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
            old_plane_state->crtc != new_plane_state->crtc)
                return -EINVAL;
 
+       /*
+        * FIXME: Since prepare_fb and cleanup_fb are always called on
+        * the new_plane_state for async updates we need to block framebuffer
+        * changes. This prevents use of a fb that's been cleaned up and
+        * double cleanups from occuring.
+        */
+       if (old_plane_state->fb != new_plane_state->fb)
+               return -EINVAL;
+
        funcs = plane->helper_private;
        if (!funcs->atomic_async_update)
                return -EINVAL;
index b1838a41ad4359782432df8bbc7e71425fc74ee8..83a5bbca6e7e089f10d75ea723ac982b7df61356 100644 (file)
@@ -262,6 +262,18 @@ void drm_file_free(struct drm_file *file)
        kfree(file);
 }
 
+static void drm_close_helper(struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+       struct drm_device *dev = file_priv->minor->dev;
+
+       mutex_lock(&dev->filelist_mutex);
+       list_del(&file_priv->lhead);
+       mutex_unlock(&dev->filelist_mutex);
+
+       drm_file_free(file_priv);
+}
+
 static int drm_setup(struct drm_device * dev)
 {
        int ret;
@@ -318,8 +330,10 @@ int drm_open(struct inode *inode, struct file *filp)
                goto err_undo;
        if (need_setup) {
                retcode = drm_setup(dev);
-               if (retcode)
+               if (retcode) {
+                       drm_close_helper(filp);
                        goto err_undo;
+               }
        }
        return 0;
 
@@ -473,11 +487,7 @@ int drm_release(struct inode *inode, struct file *filp)
 
        DRM_DEBUG("open_count = %d\n", dev->open_count);
 
-       mutex_lock(&dev->filelist_mutex);
-       list_del(&file_priv->lhead);
-       mutex_unlock(&dev->filelist_mutex);
-
-       drm_file_free(file_priv);
+       drm_close_helper(filp);
 
        if (!--dev->open_count) {
                drm_lastclose(dev);
index 7e6746b2d704c588db58f950461d293d4a1fd018..687943df58e1b19df1cd0eb25c8abe96e79deeb3 100644 (file)
@@ -508,6 +508,13 @@ int drm_version(struct drm_device *dev, void *data,
        return err;
 }
 
+static inline bool
+drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
+{
+       return drm_core_check_feature(dev, DRIVER_RENDER) &&
+               (flags & DRM_RENDER_ALLOW);
+}
+
 /**
  * drm_ioctl_permit - Check ioctl permissions against caller
  *
@@ -522,14 +529,19 @@ int drm_version(struct drm_device *dev, void *data,
  */
 int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
 {
+       const struct drm_device *dev = file_priv->minor->dev;
+
        /* ROOT_ONLY is only for CAP_SYS_ADMIN */
        if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
                return -EACCES;
 
-       /* AUTH is only for authenticated or render client */
-       if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
-                    !file_priv->authenticated))
-               return -EACCES;
+       /* AUTH is only for master ... */
+       if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
+               /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
+               if (!file_priv->authenticated &&
+                   !drm_render_driver_and_ioctl(dev, flags))
+                       return -EACCES;
+       }
 
        /* MASTER is only for master or control clients */
        if (unlikely((flags & DRM_MASTER) &&
@@ -570,7 +582,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
 
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
index c9e439c82241c58569572614314cbbe484ead665..c3c84a09e628add39abe1efb243a9f0fc9c113f3 100644 (file)
@@ -4,7 +4,7 @@ config DRM_IMX
        select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
-       depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+       depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
        depends on IMX_IPUV3_CORE
        help
          enable i.MX graphics support
@@ -18,6 +18,7 @@ config DRM_IMX_PARALLEL_DISPLAY
 config DRM_IMX_TVE
        tristate "Support for TV and VGA displays"
        depends on DRM_IMX
+       depends on COMMON_CLK
        select REGMAP_MMIO
        help
          Choose this to enable the internal Television Encoder (TVe)
index 44da0f5d0ed9042ab40f7adac59240caaa5d4e11..c935cbe059a79c323306fb2d70f6684c972d3fe0 100644 (file)
@@ -49,11 +49,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
 {
        int ret;
 
-       ret = drm_atomic_helper_check_modeset(dev, state);
-       if (ret)
-               return ret;
-
-       ret = drm_atomic_helper_check_planes(dev, state);
+       ret = drm_atomic_helper_check(dev, state);
        if (ret)
                return ret;
 
@@ -229,6 +225,7 @@ static int imx_drm_bind(struct device *dev)
        drm->mode_config.funcs = &imx_drm_mode_config_funcs;
        drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
        drm->mode_config.allow_fb_modifiers = true;
+       drm->mode_config.normalize_zpos = true;
 
        drm_mode_config_init(drm);
 
index 3c62167a92510a1d205b180ada730361c4964b25..ec3602ebbc1cd1e87da13c9c909078927ccf2287 100644 (file)
@@ -34,6 +34,7 @@ struct ipu_crtc {
        struct ipu_dc           *dc;
        struct ipu_di           *di;
        int                     irq;
+       struct drm_pending_vblank_event *event;
 };
 
 static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
@@ -173,8 +174,31 @@ static const struct drm_crtc_funcs ipu_crtc_funcs = {
 static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
 {
        struct ipu_crtc *ipu_crtc = dev_id;
+       struct drm_crtc *crtc = &ipu_crtc->base;
+       unsigned long flags;
+       int i;
+
+       drm_crtc_handle_vblank(crtc);
+
+       if (ipu_crtc->event) {
+               for (i = 0; i < ARRAY_SIZE(ipu_crtc->plane); i++) {
+                       struct ipu_plane *plane = ipu_crtc->plane[i];
 
-       drm_crtc_handle_vblank(&ipu_crtc->base);
+                       if (!plane)
+                               continue;
+
+                       if (ipu_plane_atomic_update_pending(&plane->base))
+                               break;
+               }
+
+               if (i == ARRAY_SIZE(ipu_crtc->plane)) {
+                       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+                       drm_crtc_send_vblank_event(crtc, ipu_crtc->event);
+                       ipu_crtc->event = NULL;
+                       drm_crtc_vblank_put(crtc);
+                       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               }
+       }
 
        return IRQ_HANDLED;
 }
@@ -223,8 +247,10 @@ static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
 {
        spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
+               struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
                WARN_ON(drm_crtc_vblank_get(crtc));
-               drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+               ipu_crtc->event = crtc->state->event;
                crtc->state->event = NULL;
        }
        spin_unlock_irq(&crtc->dev->event_lock);
index 21e964f6ab5cf58559e059d7d34c4bb26359c042..d7a727a6e3d72c15aa37539bcbda1b051b944d86 100644 (file)
@@ -273,6 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
 
 static void ipu_plane_state_reset(struct drm_plane *plane)
 {
+       unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
        struct ipu_plane_state *ipu_state;
 
        if (plane->state) {
@@ -284,8 +285,11 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
 
        ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
 
-       if (ipu_state)
+       if (ipu_state) {
                __drm_atomic_helper_plane_reset(plane, &ipu_state->base);
+               ipu_state->base.zpos = zpos;
+               ipu_state->base.normalized_zpos = zpos;
+       }
 }
 
 static struct drm_plane_state *
@@ -560,6 +564,25 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
                ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1);
 
+       switch (ipu_plane->dp_flow) {
+       case IPU_DP_FLOW_SYNC_BG:
+               if (state->normalized_zpos == 1) {
+                       ipu_dp_set_global_alpha(ipu_plane->dp,
+                                               !fb->format->has_alpha, 0xff,
+                                               true);
+               } else {
+                       ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+               }
+               break;
+       case IPU_DP_FLOW_SYNC_FG:
+               if (state->normalized_zpos == 1) {
+                       ipu_dp_set_global_alpha(ipu_plane->dp,
+                                               !fb->format->has_alpha, 0xff,
+                                               false);
+               }
+               break;
+       }
+
        eba = drm_plane_state_to_eba(state, 0);
 
        /*
@@ -582,6 +605,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
                ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+               ipu_plane->next_buf = !active;
                if (ipu_plane_separate_alpha(ipu_plane)) {
                        active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
                        ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
@@ -595,34 +619,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        switch (ipu_plane->dp_flow) {
        case IPU_DP_FLOW_SYNC_BG:
                ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
-               ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
                break;
        case IPU_DP_FLOW_SYNC_FG:
                ipu_dp_setup_channel(ipu_plane->dp, ics,
                                        IPUV3_COLORSPACE_UNKNOWN);
-               /* Enable local alpha on partial plane */
-               switch (fb->format->format) {
-               case DRM_FORMAT_ARGB1555:
-               case DRM_FORMAT_ABGR1555:
-               case DRM_FORMAT_RGBA5551:
-               case DRM_FORMAT_BGRA5551:
-               case DRM_FORMAT_ARGB4444:
-               case DRM_FORMAT_ARGB8888:
-               case DRM_FORMAT_ABGR8888:
-               case DRM_FORMAT_RGBA8888:
-               case DRM_FORMAT_BGRA8888:
-               case DRM_FORMAT_RGB565_A8:
-               case DRM_FORMAT_BGR565_A8:
-               case DRM_FORMAT_RGB888_A8:
-               case DRM_FORMAT_BGR888_A8:
-               case DRM_FORMAT_RGBX8888_A8:
-               case DRM_FORMAT_BGRX8888_A8:
-                       ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
-                       break;
-               default:
-                       ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
-                       break;
-               }
+               break;
        }
 
        ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
@@ -709,6 +710,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
        ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
        ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
        ipu_plane_enable(ipu_plane);
+       ipu_plane->next_buf = -1;
 }
 
 static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
@@ -718,6 +720,24 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
        .atomic_update = ipu_plane_atomic_update,
 };
 
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
+{
+       struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+       struct drm_plane_state *state = plane->state;
+       struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+       /* disabled crtcs must not block the update */
+       if (!state->crtc)
+               return false;
+
+       if (ipu_state->use_pre)
+               return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
+       else if (ipu_plane->next_buf >= 0)
+               return ipu_idmac_get_current_buffer(ipu_plane->ipu_ch) !=
+                      ipu_plane->next_buf;
+
+       return false;
+}
 int ipu_planes_assign_pre(struct drm_device *dev,
                          struct drm_atomic_state *state)
 {
@@ -806,6 +826,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 {
        struct ipu_plane *ipu_plane;
        const uint64_t *modifiers = ipu_format_modifiers;
+       unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
        int ret;
 
        DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
@@ -836,5 +857,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 
        drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
 
+       if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
+               drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+       else
+               drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+
        return ipu_plane;
 }
index e563ea17a827c81e4be29b346f9f23e2fbc76853..15e85e15d35c50205d3926880fce393910be2f94 100644 (file)
@@ -27,6 +27,7 @@ struct ipu_plane {
        int                     dp_flow;
 
        bool                    disabling;
+       int                     next_buf;
 };
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,5 +49,6 @@ int ipu_plane_irq(struct ipu_plane *plane);
 
 void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
 void ipu_plane_disable_deferred(struct drm_plane *plane);
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane);
 
 #endif
index a97294ac96d5914b2cfdd561649a0ccdf56a9fb6..a12439266bb0716e049c25a8e518a265fe84f74e 100644 (file)
@@ -4869,10 +4869,12 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
                        pi->force_pcie_gen = RADEON_PCIE_GEN2;
                        if (current_link_speed == RADEON_PCIE_GEN2)
                                break;
+                       /* fall through */
                case RADEON_PCIE_GEN2:
                        if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
                                break;
 #endif
+                       /* fall through */
                default:
                        pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
                        break;
index f471537c852fd1bd00e7aeb8d2ba30fe1685aa82..1e14c6921454932471760fab5283ebc332792e4f 100644 (file)
@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        return -EINVAL;
                }
                ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+               break;
        case CB_TARGET_MASK:
                track->cb_target_mask = radeon_get_ib_value(p, idx);
                track->cb_dirty = true;
index dec1e081f52958e9f1d5cfe7813c95ef021ca320..6a8fb6fd183c3fc80a6b557fba5e4751307f9b53 100644 (file)
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
        }
 
        if (radeon_is_px(dev)) {
+               dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
index 0a785ef0ab660f41c2a204fb766068c93dcfa044..c9f6cb77e85780ac1cbc881155cf748d93588f1a 100644 (file)
@@ -5762,10 +5762,12 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic
                        si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
                        if (current_link_speed == RADEON_PCIE_GEN2)
                                break;
+                       /* fall through */
                case RADEON_PCIE_GEN2:
                        if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
                                break;
 #endif
+                       /* fall through */
                default:
                        si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
                        break;
index e2942c9a11a7aeccb6710cdb7890cc56e1558d73..35ddbec1375ae880bba180a4d7f46173b38e2ec2 100644 (file)
@@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 {
        int i;
 
-       if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
+       if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
                return -EINVAL;
 
        memset(entity, 0, sizeof(struct drm_sched_entity));
        INIT_LIST_HEAD(&entity->list);
-       entity->rq = rq_list[0];
+       entity->rq = NULL;
        entity->guilty = guilty;
        entity->num_rq_list = num_rq_list;
        entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
@@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 
        for (i = 0; i < num_rq_list; ++i)
                entity->rq_list[i] = rq_list[i];
+
+       if (num_rq_list)
+               entity->rq = rq_list[0];
+
        entity->last_scheduled = NULL;
 
        spin_lock_init(&entity->rq_lock);
@@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
        struct task_struct *last_user;
        long ret = timeout;
 
+       if (!entity->rq)
+               return 0;
+
        sched = entity->rq->sched;
        /**
         * The client will not queue more IBs during this fini, consume existing
@@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
  */
 void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched;
+       struct drm_gpu_scheduler *sched = NULL;
 
-       sched = entity->rq->sched;
-       drm_sched_rq_remove_entity(entity->rq, entity);
+       if (entity->rq) {
+               sched = entity->rq->sched;
+               drm_sched_rq_remove_entity(entity->rq, entity);
+       }
 
        /* Consumption of existing IBs wasn't completed. Forcefully
         * remove them here.
         */
        if (spsc_queue_peek(&entity->job_queue)) {
-               /* Park the kernel for a moment to make sure it isn't processing
-                * our enity.
-                */
-               kthread_park(sched->thread);
-               kthread_unpark(sched->thread);
+               if (sched) {
+                       /* Park the kernel for a moment to make sure it isn't processing
+                        * our enity.
+                        */
+                       kthread_park(sched->thread);
+                       kthread_unpark(sched->thread);
+               }
                if (entity->dependency) {
                        dma_fence_remove_callback(entity->dependency,
                                                  &entity->cb);
@@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
        for (i = 0; i < entity->num_rq_list; ++i)
                drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
 
-       drm_sched_rq_remove_entity(entity->rq, entity);
-       drm_sched_entity_set_rq_priority(&entity->rq, priority);
-       drm_sched_rq_add_entity(entity->rq, entity);
+       if (entity->rq) {
+               drm_sched_rq_remove_entity(entity->rq, entity);
+               drm_sched_entity_set_rq_priority(&entity->rq, priority);
+               drm_sched_rq_add_entity(entity->rq, entity);
+       }
 
        spin_unlock(&entity->rq_lock);
 }
index 4a28f3fbb0a28c55630be20edbb8c2df4de74645..6cacfd61d984207084f04f579cf2fda38811a3ce 100644 (file)
@@ -265,6 +265,12 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
        writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
 }
 
+bool ipu_pre_update_pending(struct ipu_pre *pre)
+{
+       return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
+                 IPU_PRE_CTRL_SDW_UPDATE);
+}
+
 u32 ipu_pre_get_baddr(struct ipu_pre *pre)
 {
        return (u32)pre->buffer_paddr;
index 38a3a9764e49a1c3b412920e52cfe330469b47ad..94b76badf677e78e799a0698d7c0c608e74d0ecc 100644 (file)
@@ -347,6 +347,22 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
 }
 EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
 
+bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
+{
+       int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+       struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+       struct ipu_prg_channel *chan;
+
+       if (prg_chan < 0)
+               return false;
+
+       chan = &prg->chan[prg_chan];
+       WARN_ON(!chan->enabled);
+
+       return ipu_pre_update_pending(prg->pres[chan->used_pre]);
+}
+EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
+
 static int ipu_prg_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
index d6beee99b6b8c14a8bc31062656a62c2dd86354b..38622e835e95d4eaabd48dd501725237ba28e39a 100644 (file)
@@ -272,6 +272,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
                       unsigned int height, unsigned int stride, u32 format,
                       uint64_t modifier, unsigned int bufaddr);
 void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
+bool ipu_pre_update_pending(struct ipu_pre *pre);
 
 struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
                                          int ipu_id);
index c13c0ba30f63e8f62694f896b8251bea9db5498a..d499cd61c0e89f659f555501b27484b3e2bc6df4 100644 (file)
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 {
        int err;
+       unsigned int factor;
 
        c4iw_init_dev_ucontext(rdev, &rdev->uctx);
 
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                return -EINVAL;
        }
 
-       rdev->qpmask = rdev->lldi.udb_density - 1;
-       rdev->cqmask = rdev->lldi.ucq_density - 1;
+       /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */
+       if (rdev->lldi.sge_host_page_size > PAGE_SIZE) {
+               pr_err("%s: unsupported sge host page size %u\n",
+                      pci_name(rdev->lldi.pdev),
+                      rdev->lldi.sge_host_page_size);
+               return -EINVAL;
+       }
+
+       factor = PAGE_SIZE / rdev->lldi.sge_host_page_size;
+       rdev->qpmask = (rdev->lldi.udb_density * factor) - 1;
+       rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1;
+
        pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n",
                 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
                 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
index 31d91538bbf459b76737205985507efb27b2ae65..694324b374803e8e36f6a2c7f31c99ae33c954df 100644 (file)
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
        if (status)
                return FAILED;
 
-       for (i = 0; i < target->ch_count; i++) {
-               ch = &target->ch[i];
-               for (j = 0; j < target->req_ring_size; ++j) {
-                       struct srp_request *req = &ch->req_ring[j];
-
-                       srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
-               }
-       }
-
        return SUCCESS;
 }
 
index dc9f14811e0f4d9ed3a12936501274ad55789a55..58dc70bffd5b87ec682984e3a9e8842b5dcfb68d 100644 (file)
@@ -144,7 +144,7 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
                for (tmp = dev; tmp; tmp = tmp->bus->self)
                        level++;
 
-       size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
+       size = sizeof(*info) + level * sizeof(info->path[0]);
        if (size <= sizeof(dmar_pci_notify_info_buf)) {
                info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
        } else {
index d713271ebf7c73f161456195e9f3ffc68db8c495..a64116586b4cccbfbe6b8e26c38209eb7ac5cd10 100644 (file)
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan)
 
        /* Clear ring flush state */
        timeout = 1000; /* timeout of 1s */
-       writel_relaxed(0x0, ring + RING_CONTROL);
+       writel_relaxed(0x0, ring->regs + RING_CONTROL);
        do {
-               if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
+               if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
                      FLUSH_DONE_MASK))
                        break;
                mdelay(1);
index c6a7d4582dc6790be3a50f545faa36bc07ea8a7f..38d9df3fb199fed88a4825b7b392e20c27e85c2a 100644 (file)
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mbox_flush);
 
 /**
  * mbox_request_channel - Request a mailbox channel.
index 14f3fdb8c6bb7aad30d3b9f1337afcb1f6e34f7e..9ce8eb51a60fd1d377107fc8264ca16e8b32e493 100644 (file)
@@ -2380,12 +2380,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
                 "mmcblk%u%s", card->host->index, subname ? subname : "");
 
-       if (mmc_card_mmc(card))
-               blk_queue_logical_block_size(md->queue.queue,
-                                            card->ext_csd.data_sector_size);
-       else
-               blk_queue_logical_block_size(md->queue.queue, 512);
-
        set_capacity(md->disk, size);
 
        if (mmc_host_cmd23(card->host)) {
index 5bd58b95d318ea2b86b4a784244bbbf65a3dfca2..b27a1e6202331f3692db7db58f20e1f5bed04f29 100644 (file)
@@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
        if (!data)
                return;
 
-       if (cmd->error || data->error ||
+       if ((cmd && cmd->error) || data->error ||
            !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
                return;
 
index 35cc138b096d95bf37ec186fa13c8ebe0a84dc7c..15a45ec6518d75c3fd8e602c313ebe77d3aa7376 100644 (file)
@@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
 {
        struct mmc_host *host = card->host;
        u64 limit = BLK_BOUNCE_HIGH;
+       unsigned block_size = 512;
 
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
                limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
        blk_queue_max_hw_sectors(mq->queue,
                min(host->max_blk_count, host->max_req_size / 512));
        blk_queue_max_segments(mq->queue, host->max_segs);
-       blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+       if (mmc_card_mmc(card))
+               block_size = card->ext_csd.data_sector_size;
+
+       blk_queue_logical_block_size(mq->queue, block_size);
+       blk_queue_max_segment_size(mq->queue,
+                       round_down(host->max_seg_size, block_size));
 
        INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
        INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
index 159270e947cf62965a932edf9eb1d3a478ac89db..a8af682a9182160e1e9074f1714dd33f5ee39cdd 100644 (file)
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
        cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 
        cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
-               (cq_host->num_slots - 1);
+               cq_host->mmc->cqe_qdepth;
 
        pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
                 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
                                                 cq_host->desc_size,
                                                 &cq_host->desc_dma_base,
                                                 GFP_KERNEL);
+       if (!cq_host->desc_base)
+               return -ENOMEM;
+
        cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
                                              cq_host->data_size,
                                              &cq_host->trans_desc_dma_base,
                                              GFP_KERNEL);
-       if (!cq_host->desc_base || !cq_host->trans_desc_base)
+       if (!cq_host->trans_desc_base) {
+               dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+                                  cq_host->desc_base,
+                                  cq_host->desc_dma_base);
+               cq_host->desc_base = NULL;
+               cq_host->desc_dma_base = 0;
                return -ENOMEM;
+       }
 
        pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
                 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
index 10ba46b728e82279d66aea48792388145383bbee..8ade14fb2148dbcf8a8fa8087fdbb8dfbd1842ca 100644 (file)
@@ -1450,6 +1450,7 @@ static int mmc_spi_probe(struct spi_device *spi)
                mmc->caps &= ~MMC_CAP_NEEDS_POLL;
                mmc_gpiod_request_cd_irq(mmc);
        }
+       mmc_detect_change(mmc, 0);
 
        /* Index 1 is write protect/read only */
        status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL);
index 8471160316e073c5fee142b3393ea57c9d9f4e0a..02cd878e209f4bd009b1d40b801a4b3433efdcde 100644 (file)
@@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
        .scc_offset     = 0x0300,
        .taps           = rcar_gen2_scc_taps,
        .taps_num       = ARRAY_SIZE(rcar_gen2_scc_taps),
+       .max_blk_count  = 0xffffffff,
 };
 
 /* Definitions for sampling clocks */
index d0d319398a547827a5a7a5053f3f12f802402f61..00d41b312c79af466a63241311f17103a98e781d 100644 (file)
@@ -1095,11 +1095,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
                writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
                        | ESDHC_BURST_LEN_EN_INCR,
                        host->ioaddr + SDHCI_HOST_CONTROL);
+
                /*
-               * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
-               * TO1.1, it's harmless for MX6SL
-               */
-               writel(readl(host->ioaddr + 0x6c) BIT(7),
+                * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+                * TO1.1, it's harmless for MX6SL
+                */
+               writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
                        host->ioaddr + 0x6c);
 
                /* disable DLL_CTRL delay line settings */
index c03529e3f01a13905dd6fbb9bbbfcb35c31de93c..2adb0d24360fbb1bd2e81e9d67e261a960249d19 100644 (file)
@@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
        iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
 }
 
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+       iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
 static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
                                       const u32 *buf, int count)
 {
index 085a0fab769c02a64f6570a73bdc2363b9fac99a..f7a6f005899a351ace1b5b3ecf8c8dca702e157e 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/mmc/sdio.h>
 #include <linux/scatterlist.h>
+#include <linux/sizes.h>
 #include <linux/spinlock.h>
 #include <linux/swiotlb.h>
 #include <linux/workqueue.h>
@@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
        return false;
 }
 
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
 {
        struct mmc_host *mmc = host->mmc;
        struct tmio_mmc_data *pdata = host->pdata;
@@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
        unsigned int sdio_status;
 
        if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
-               return;
+               return false;
 
        status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
        ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
 
        if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
                mmc_signal_sdio_irq(mmc);
+
+       return ireg;
 }
 
 irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
        if (__tmio_mmc_sdcard_irq(host, ireg, status))
                return IRQ_HANDLED;
 
-       __tmio_mmc_sdio_irq(host);
+       if (__tmio_mmc_sdio_irq(host))
+               return IRQ_HANDLED;
 
-       return IRQ_HANDLED;
+       return IRQ_NONE;
 }
 EXPORT_SYMBOL_GPL(tmio_mmc_irq);
 
@@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
 
        /* Set transfer length / blocksize */
        sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
-       sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+       if (host->mmc->max_blk_count >= SZ_64K)
+               sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+       else
+               sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
        tmio_mmc_start_dma(host, data);
 
index 22f753e555ac50b9922a282dd42c41fc99ee1128..83f88b8b5d9f04d02cfed1361493612d8f9d5af2 100644 (file)
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev,
         * Going to have to check what details I need to set and how to
         * get them
         */
-       mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node);
+       mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
        mtd->type = MTD_NORFLASH;
        mtd->flags = MTD_WRITEABLE;
        mtd->size = size;
index 999b705769a847754b7a8e4df63f3b34c82bb4b7..3ef01baef9b62d1085de674b43f6310fbdc62821 100644 (file)
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
 {
        struct nvmem_config config = {};
 
+       config.id = -1;
        config.dev = &mtd->dev;
        config.name = mtd->name;
        config.owner = THIS_MODULE;
index 485462d3087fcadcdbdb6fefc4b0846dbbb56838..537c90c8eb0acf953ae1928806fbf0eb540cb47c 100644 (file)
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       /* Link-local multicast packets should be passed to the
-        * stack on the link they arrive as well as pass them to the
-        * bond-master device. These packets are mostly usable when
-        * stack receives it with the link on which they arrive
-        * (e.g. LLDP) they also must be available on master. Some of
-        * the use cases include (but are not limited to): LLDP agents
-        * that must be able to operate both on enslaved interfaces as
-        * well as on bonds themselves; linux bridges that must be able
-        * to process/pass BPDUs from attached bonds when any kind of
-        * STP version is enabled on the network.
+       /*
+        * For packets determined by bond_should_deliver_exact_match() call to
+        * be suppressed we want to make an exception for link-local packets.
+        * This is necessary for e.g. LLDP daemons to be able to monitor
+        * inactive slave links without being forced to bind to them
+        * explicitly.
+        *
+        * At the same time, packets that are passed to the bonding master
+        * (including link-local ones) can have their originating interface
+        * determined via PACKET_ORIGDEV socket option.
         */
-       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
-               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
-               if (nskb) {
-                       nskb->dev = bond->dev;
-                       nskb->queue_mapping = 0;
-                       netif_rx(nskb);
-               }
-               return RX_HANDLER_PASS;
-       }
-       if (bond_should_deliver_exact_match(skb, slave, bond))
+       if (bond_should_deliver_exact_match(skb, slave, bond)) {
+               if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+                       return RX_HANDLER_PASS;
                return RX_HANDLER_EXACT;
+       }
 
        skb->dev = bond->dev;
 
index 0e4bbdcc614f073c7ec7dbcbb3a83291dbeadbf0..c76892ac4e699cf083c74330c398e36b91b86a8c 100644 (file)
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
 }
 
-static void b53_enable_vlan(struct b53_device *dev, bool enable)
+static void b53_enable_vlan(struct b53_device *dev, bool enable,
+                           bool enable_filtering)
 {
        u8 mgmt, vc0, vc1, vc4 = 0, vc5;
 
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
                vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
                vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
                vc4 &= ~VC4_ING_VID_CHECK_MASK;
-               vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
-               vc5 |= VC5_DROP_VTABLE_MISS;
+               if (enable_filtering) {
+                       vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+                       vc5 |= VC5_DROP_VTABLE_MISS;
+               } else {
+                       vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
+                       vc5 &= ~VC5_DROP_VTABLE_MISS;
+               }
 
                if (is5325(dev))
                        vc0 &= ~VC0_RESERVED_1;
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
        }
 
        b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
+
+       dev->vlan_enabled = enable;
+       dev->vlan_filtering_enabled = enable_filtering;
 }
 
 static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev)
        b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
 }
 
+static u16 b53_default_pvid(struct b53_device *dev)
+{
+       if (is5325(dev) || is5365(dev))
+               return 1;
+       else
+               return 0;
+}
+
 int b53_configure_vlan(struct dsa_switch *ds)
 {
        struct b53_device *dev = ds->priv;
        struct b53_vlan vl = { 0 };
-       int i;
+       int i, def_vid;
+
+       def_vid = b53_default_pvid(dev);
 
        /* clear all vlan entries */
        if (is5325(dev) || is5365(dev)) {
-               for (i = 1; i < dev->num_vlans; i++)
+               for (i = def_vid; i < dev->num_vlans; i++)
                        b53_set_vlan_entry(dev, i, &vl);
        } else {
                b53_do_vlan_op(dev, VTA_CMD_CLEAR);
        }
 
-       b53_enable_vlan(dev, false);
+       b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
 
        b53_for_each_port(dev, i)
                b53_write16(dev, B53_VLAN_PAGE,
-                           B53_VLAN_PORT_DEF_TAG(i), 1);
+                           B53_VLAN_PORT_DEF_TAG(i), def_vid);
 
        if (!is5325(dev) && !is5365(dev))
                b53_set_jumbo(dev, dev->enable_jumbo, false);
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
 
 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
 {
+       struct b53_device *dev = ds->priv;
+       struct net_device *bridge_dev;
+       unsigned int i;
+       u16 pvid, new_pvid;
+
+       /* Handle the case were multiple bridges span the same switch device
+        * and one of them has a different setting than what is being requested
+        * which would be breaking filtering semantics for any of the other
+        * bridge devices.
+        */
+       b53_for_each_port(dev, i) {
+               bridge_dev = dsa_to_port(ds, i)->bridge_dev;
+               if (bridge_dev &&
+                   bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
+                   br_vlan_enabled(bridge_dev) != vlan_filtering) {
+                       netdev_err(bridge_dev,
+                                  "VLAN filtering is global to the switch!\n");
+                       return -EINVAL;
+               }
+       }
+
+       b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+       new_pvid = pvid;
+       if (dev->vlan_filtering_enabled && !vlan_filtering) {
+               /* Filtering is currently enabled, use the default PVID since
+                * the bridge does not expect tagging anymore
+                */
+               dev->ports[port].pvid = pvid;
+               new_pvid = b53_default_pvid(dev);
+       } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+               /* Filtering is currently disabled, restore the previous PVID */
+               new_pvid = dev->ports[port].pvid;
+       }
+
+       if (pvid != new_pvid)
+               b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+                           new_pvid);
+
+       b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
+
        return 0;
 }
 EXPORT_SYMBOL(b53_vlan_filtering);
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
        if (vlan->vid_end > dev->num_vlans)
                return -ERANGE;
 
-       b53_enable_vlan(dev, true);
+       b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
 
        return 0;
 }
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_fast_age_vlan(dev, vid);
        }
 
-       if (pvid) {
+       if (pvid && !dsa_is_cpu_port(ds, port)) {
                b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
                            vlan->vid_end);
                b53_fast_age_vlan(dev, vid);
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
 
                vl->members &= ~BIT(port);
 
-               if (pvid == vid) {
-                       if (is5325(dev) || is5365(dev))
-                               pvid = 1;
-                       else
-                               pvid = 0;
-               }
+               if (pvid == vid)
+                       pvid = b53_default_pvid(dev);
 
                if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag &= ~(BIT(port));
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
        b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
        dev->ports[port].vlan_ctl_mask = pvlan;
 
-       if (is5325(dev) || is5365(dev))
-               pvid = 1;
-       else
-               pvid = 0;
+       pvid = b53_default_pvid(dev);
 
        /* Make this port join all VLANs without VLAN entries */
        if (is58xx(dev)) {
index ec796482792d117a962510fe65c911d5144df7a0..4dc7ee38b2580fe8fee4e22e31f505559cd53dd8 100644 (file)
@@ -91,6 +91,7 @@ enum {
 struct b53_port {
        u16             vlan_ctl_mask;
        struct ethtool_eee eee;
+       u16             pvid;
 };
 
 struct b53_vlan {
@@ -137,6 +138,8 @@ struct b53_device {
 
        unsigned int num_vlans;
        struct b53_vlan *vlans;
+       bool vlan_enabled;
+       bool vlan_filtering_enabled;
        unsigned int num_ports;
        struct b53_port *ports;
 };
index 17ec32b0a1cc0522ca7e250c859750cbf2d2c26a..14138d423cf1698b963cd84307259d7188e368ec 100644 (file)
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
 {
        struct net_device *p = ds->ports[port].cpu_dp->master;
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       struct ethtool_wolinfo pwol;
+       struct ethtool_wolinfo pwol = { };
 
        /* Get the parent device WoL settings */
-       p->ethtool_ops->get_wol(p, &pwol);
+       if (p->ethtool_ops->get_wol)
+               p->ethtool_ops->get_wol(p, &pwol);
 
        /* Advertise the parent device supported settings */
        wol->supported = pwol.supported;
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
        struct net_device *p = ds->ports[port].cpu_dp->master;
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        s8 cpu_port = ds->ports[port].cpu_dp->index;
-       struct ethtool_wolinfo pwol;
+       struct ethtool_wolinfo pwol =  { };
 
-       p->ethtool_ops->get_wol(p, &pwol);
+       if (p->ethtool_ops->get_wol)
+               p->ethtool_ops->get_wol(p, &pwol);
        if (wol->wolopts & ~pwol.supported)
                return -EINVAL;
 
index 693a67f45bef1c74a3a412a915965372ff67b479..ddc1f9ca8ebcaa95751e4732e5d64f1dd83c32af 100644 (file)
@@ -1162,6 +1162,12 @@ static struct platform_driver gswip_driver = {
 
 module_platform_driver(gswip_driver);
 
+MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin");
+MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin");
 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
 MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver");
 MODULE_LICENSE("GPL v2");
index 12fd7ce3f1ffdb7cdd9875af49468ff729a82c7a..7e3c00bd9532a1051483f9e264f8c1538536870d 100644 (file)
@@ -896,7 +896,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
        default:
                return U64_MAX;
        }
-       value = (((u64)high) << 16) | low;
+       value = (((u64)high) << 32) | low;
        return value;
 }
 
@@ -3093,7 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
        .port_link_state = mv88e6352_port_link_state,
        .port_get_cmode = mv88e6185_port_get_cmode,
-       .stats_snapshot = mv88e6320_g1_stats_snapshot,
+       .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -4595,6 +4595,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
        return 0;
 }
 
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+       int i;
+
+       for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+               chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
 static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
                                                        int port)
 {
@@ -4631,6 +4639,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
        if (err)
                goto free;
 
+       mv88e6xxx_ports_cmode_init(chip);
+
        mutex_lock(&chip->reg_lock);
        err = mv88e6xxx_switch_reset(chip);
        mutex_unlock(&chip->reg_lock);
index ebd26b6a93e6b6084545e2649daa40c9621da47d..79ab51e69aee4df28059df4371694fc4b4ca401d 100644 (file)
@@ -398,6 +398,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                cmode = 0;
        }
 
+       /* cmode doesn't change, nothing to do for us */
+       if (cmode == chip->ports[port].cmode)
+               return 0;
+
        lane = mv88e6390x_serdes_get_lane(chip, port);
        if (lane < 0)
                return lane;
@@ -408,7 +412,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                        return err;
        }
 
-       err = mv88e6390_serdes_power(chip, port, false);
+       err = mv88e6390x_serdes_power(chip, port, false);
        if (err)
                return err;
 
@@ -424,7 +428,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
                if (err)
                        return err;
 
-               err = mv88e6390_serdes_power(chip, port, true);
+               err = mv88e6390x_serdes_power(chip, port, true);
                if (err)
                        return err;
 
index e583641de758e816c321e980c81266a5b5d696c3..4aadf321edb7e5703afbea8be817cd7126ddda5c 100644 (file)
@@ -52,6 +52,7 @@
 #define MV88E6185_PORT_STS_CMODE_1000BASE_X    0x0005
 #define MV88E6185_PORT_STS_CMODE_PHY           0x0006
 #define MV88E6185_PORT_STS_CMODE_DISABLED      0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID       0xff
 
 /* Offset 0x01: MAC (or PCS or Physical) Control Register */
 #define MV88E6XXX_PORT_MAC_CTL                         0x01
index b58ca7cb8e9d4eef80055cfdba3dab08db4127ad..fbba300c1d01c1005e2ebb7ae3ddadca25821884 100644 (file)
@@ -275,6 +275,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
 {
+       /* Tx TC/Queue number config */
+       hw_atl_rpb_tps_tx_tc_mode_set(self, 1U);
+
        hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
        hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
        hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
index 939f77e2e1178b4c0cf01d97c5197b8128edbed2..8ac7a67b15c1f690276d37db3d29cc57c5ba3ce2 100644 (file)
@@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
                            HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en);
 }
 
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+                                  u32 tx_traf_class_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR,
+                       HW_ATL_TPB_TX_TC_MODE_MSK,
+                       HW_ATL_TPB_TX_TC_MODE_SHIFT,
+                       tx_traf_class_mode);
+}
+
 void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
                                                u32 tx_buff_hi_threshold_per_tc,
                                         u32 buffer)
index 03c570d115fe4b1991eee11d4af33555f75f9f48..f529540bfd7e71f176d91f76598d5caf1aa51dac 100644 (file)
@@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
 
 /* tpb */
 
+/* set TX Traffic Class Mode */
+void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw,
+                                  u32 tx_traf_class_mode);
+
 /* set tx buffer enable */
 void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
 
index 8470d92db81237a06fd2c6076eb347177018c782..e91ffce005f12fd8e085416fa1d977062ae99bcf 100644 (file)
 /* default value of bitfield tx_buf_en */
 #define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0
 
+/* register address for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900
+/* bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100
+/* inverted bitmask for bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF
+/* lower bit position of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8
+/* width of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1
+/* default value of bitfield tx_tc_mode */
+#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0
+
 /* tx tx{b}_hi_thresh[c:0] bitfield definitions
  * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
  * parameter: buffer {b} | stride size 0x10 | range [0, 7]
index bb41becb66099389216192c761541ad1fd51790d..31ff1e0d1baacc1fba3a95329b9de8159dfbadd6 100644 (file)
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct net_device *netdev;
        struct atl2_adapter *adapter;
-       static int cards_found;
+       static int cards_found = 0;
        unsigned long mmio_start;
        int mmio_len;
        int err;
 
-       cards_found = 0;
-
        err = pci_enable_device(pdev);
        if (err)
                return err;
index 28c9b0bdf2f6cff3fc5cfeed3d8ef0beed1dfb60..bc3ac369cbe35571a9f43c7589fff01148fd68a0 100644 (file)
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev,
 
        priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
        reg = rxchk_readl(priv, RXCHK_CONTROL);
+       /* Clear L2 header checks, which would prevent BPDUs
+        * from being received.
+        */
+       reg &= ~RXCHK_L2_HDR_DIS;
        if (priv->rx_chk_en)
                reg |= RXCHK_EN;
        else
index 8bc7e495b027083942e6963c56bed62a25fd6f5d..803f7990d32b304f7516beaab2a3ed133fa6ca32 100644 (file)
@@ -500,6 +500,12 @@ normal_tx:
        }
 
        length >>= 9;
+       if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+               dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+                                    skb->len);
+               i = 0;
+               goto tx_dma_error;
+       }
        flags |= bnxt_lhint_arr[length];
        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 
@@ -3903,7 +3909,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                        if (len)
                                break;
                        /* on first few passes, just barely sleep */
-                       if (i < DFLT_HWRM_CMD_TIMEOUT)
+                       if (i < HWRM_SHORT_TIMEOUT_COUNTER)
                                usleep_range(HWRM_SHORT_MIN_TIMEOUT,
                                             HWRM_SHORT_MAX_TIMEOUT);
                        else
@@ -3926,7 +3932,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
                        dma_rmb();
                        if (*valid)
                                break;
-                       udelay(1);
+                       usleep_range(1, 5);
                }
 
                if (j >= HWRM_VALID_BIT_DELAY_USEC) {
index a451796deefe50890c198aba294134e8c6453b62..2fb653e0048da1370e71bc2ff03d3c1352cc8c74 100644 (file)
@@ -582,7 +582,7 @@ struct nqe_cn {
        (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +          \
         ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
 
-#define HWRM_VALID_BIT_DELAY_USEC      20
+#define HWRM_VALID_BIT_DELAY_USEC      150
 
 #define BNXT_HWRM_CHNL_CHIMP   0
 #define BNXT_HWRM_CHNL_KONG    1
index f4d81765221ea583b327f97879bc8ae3c705259a..62636c1ed14194977e1f5c548413088558045de2 100644 (file)
@@ -271,7 +271,7 @@ struct xcast_addr_list {
 };
 
 struct nicvf_work {
-       struct delayed_work    work;
+       struct work_struct     work;
        u8                     mode;
        struct xcast_addr_list *mc;
 };
@@ -327,7 +327,11 @@ struct nicvf {
        struct nicvf_work       rx_mode_work;
        /* spinlock to protect workqueue arguments from concurrent access */
        spinlock_t              rx_mode_wq_lock;
-
+       /* workqueue for handling kernel ndo_set_rx_mode() calls */
+       struct workqueue_struct *nicvf_rx_mode_wq;
+       /* mutex to protect VF's mailbox contents from concurrent access */
+       struct mutex            rx_mode_mtx;
+       struct delayed_work     link_change_work;
        /* PTP timestamp */
        struct cavium_ptp       *ptp_clock;
        /* Inbound timestamping is on */
@@ -575,10 +579,8 @@ struct set_ptp {
 
 struct xcast {
        u8    msg;
-       union {
-               u8    mode;
-               u64   mac;
-       } data;
+       u8    mode;
+       u64   mac:48;
 };
 
 /* 128 bit shared memory between PF and each VF */
index 6c8dcb65ff031d230303604c2071797027bf11a4..c90252829ed3402a86727f909eeeffe07c90c3fe 100644 (file)
@@ -57,14 +57,8 @@ struct nicpf {
 #define        NIC_GET_BGX_FROM_VF_LMAC_MAP(map)       ((map >> 4) & 0xF)
 #define        NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)      (map & 0xF)
        u8                      *vf_lmac_map;
-       struct delayed_work     dwork;
-       struct workqueue_struct *check_link;
-       u8                      *link;
-       u8                      *duplex;
-       u32                     *speed;
        u16                     cpi_base[MAX_NUM_VFS_SUPPORTED];
        u16                     rssi_base[MAX_NUM_VFS_SUPPORTED];
-       bool                    mbx_lock[MAX_NUM_VFS_SUPPORTED];
 
        /* MSI-X */
        u8                      num_vec;
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
        nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
 }
 
+/* Get BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_link_status_get(struct nicpf *nic, u8 vf)
+{
+       union nic_mbx mbx = {};
+       struct bgx_link_status link;
+       u8 bgx, lmac;
+
+       mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+       /* Get BGX, LMAC indices for the VF */
+       bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+       /* Get interface link status */
+       bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+       /* Send a mbox message to VF with current link status */
+       mbx.link_status.link_up = link.link_up;
+       mbx.link_status.duplex = link.duplex;
+       mbx.link_status.speed = link.speed;
+       mbx.link_status.mac_type = link.mac_type;
+
+       /* reply with link status */
+       nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
 /* Interrupt handler to handle mailbox messages from VFs */
 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 {
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        int i;
        int ret = 0;
 
-       nic->mbx_lock[vf] = true;
-
        mbx_addr = nic_get_mbx_addr(vf);
        mbx_data = (u64 *)&mbx;
 
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        switch (mbx.msg.msg) {
        case NIC_MBOX_MSG_READY:
                nic_mbx_send_ready(nic, vf);
-               if (vf < nic->num_vf_en) {
-                       nic->link[vf] = 0;
-                       nic->duplex[vf] = 0;
-                       nic->speed[vf] = 0;
-               }
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_QS_CFG:
                reg_addr = NIC_PF_QSET_0_127_CFG |
                           (mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                break;
        case NIC_MBOX_MSG_RSS_SIZE:
                nic_send_rss_size(nic, vf);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_RSS_CFG:
        case NIC_MBOX_MSG_RSS_CFG_CONT:
                nic_config_rss(nic, &mbx.rss_cfg);
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
        case NIC_MBOX_MSG_CFG_DONE:
                /* Last message of VF config msg sequence */
                nic_enable_vf(nic, vf, true);
-               goto unlock;
+               break;
        case NIC_MBOX_MSG_SHUTDOWN:
                /* First msg in VF teardown sequence */
                if (vf >= nic->num_vf_en)
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                break;
        case NIC_MBOX_MSG_ALLOC_SQS:
                nic_alloc_sqs(nic, &mbx.sqs_alloc);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_NICVF_PTR:
                nic->nicvf[vf] = mbx.nicvf.nicvf;
                break;
        case NIC_MBOX_MSG_PNICVF_PTR:
                nic_send_pnicvf(nic, vf);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_SNICVF_PTR:
                nic_send_snicvf(nic, &mbx.nicvf);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_BGX_STATS:
                nic_get_bgx_stats(nic, &mbx.bgx_stats);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_LOOPBACK:
                ret = nic_config_loopback(nic, &mbx.lbk);
                break;
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                break;
        case NIC_MBOX_MSG_PFC:
                nic_pause_frame(nic, vf, &mbx.pfc);
-               goto unlock;
+               return;
        case NIC_MBOX_MSG_PTP_CFG:
                nic_config_timestamp(nic, vf, &mbx.ptp);
                break;
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
                lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
                bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
-                                       mbx.xcast.data.mac,
+                                       mbx.xcast.mac,
                                        vf < NIC_VF_PER_MBX_REG ? vf :
                                        vf - NIC_VF_PER_MBX_REG);
                break;
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                }
                bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
                lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-               bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode);
+               bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
                break;
+       case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+               if (vf >= nic->num_vf_en) {
+                       ret = -1; /* NACK */
+                       break;
+               }
+               nic_link_status_get(nic, vf);
+               return;
        default:
                dev_err(&nic->pdev->dev,
                        "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
                        mbx.msg.msg, vf);
                nic_mbx_send_nack(nic, vf);
        }
-unlock:
-       nic->mbx_lock[vf] = false;
 }
 
 static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
        return 0;
 }
 
-/* Poll for BGX LMAC link status and update corresponding VF
- * if there is a change, valid only if internal L2 switch
- * is not present otherwise VF link is always treated as up
- */
-static void nic_poll_for_link(struct work_struct *work)
-{
-       union nic_mbx mbx = {};
-       struct nicpf *nic;
-       struct bgx_link_status link;
-       u8 vf, bgx, lmac;
-
-       nic = container_of(work, struct nicpf, dwork.work);
-
-       mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
-
-       for (vf = 0; vf < nic->num_vf_en; vf++) {
-               /* Poll only if VF is UP */
-               if (!nic->vf_enabled[vf])
-                       continue;
-
-               /* Get BGX, LMAC indices for the VF */
-               bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-               lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-               /* Get interface link status */
-               bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
-
-               /* Inform VF only if link status changed */
-               if (nic->link[vf] == link.link_up)
-                       continue;
-
-               if (!nic->mbx_lock[vf]) {
-                       nic->link[vf] = link.link_up;
-                       nic->duplex[vf] = link.duplex;
-                       nic->speed[vf] = link.speed;
-
-                       /* Send a mbox message to VF with current link status */
-                       mbx.link_status.link_up = link.link_up;
-                       mbx.link_status.duplex = link.duplex;
-                       mbx.link_status.speed = link.speed;
-                       mbx.link_status.mac_type = link.mac_type;
-                       nic_send_msg_to_vf(nic, vf, &mbx);
-               }
-       }
-       queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
-}
-
 static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct device *dev = &pdev->dev;
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!nic->vf_lmac_map)
                goto err_release_regions;
 
-       nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
-       if (!nic->link)
-               goto err_release_regions;
-
-       nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL);
-       if (!nic->duplex)
-               goto err_release_regions;
-
-       nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL);
-       if (!nic->speed)
-               goto err_release_regions;
-
        /* Initialize hardware */
        nic_init_hw(nic);
 
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_unregister_interrupts;
 
-       /* Register a physical link status poll fn() */
-       nic->check_link = alloc_workqueue("check_link_status",
-                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
-       if (!nic->check_link) {
-               err = -ENOMEM;
-               goto err_disable_sriov;
-       }
-
-       INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
-       queue_delayed_work(nic->check_link, &nic->dwork, 0);
-
        return 0;
 
-err_disable_sriov:
-       if (nic->flags & NIC_SRIOV_ENABLED)
-               pci_disable_sriov(pdev);
 err_unregister_interrupts:
        nic_unregister_interrupts(nic);
 err_release_regions:
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev)
        if (nic->flags & NIC_SRIOV_ENABLED)
                pci_disable_sriov(pdev);
 
-       if (nic->check_link) {
-               /* Destroy work Queue */
-               cancel_delayed_work_sync(&nic->dwork);
-               destroy_workqueue(nic->check_link);
-       }
-
        nic_unregister_interrupts(nic);
        pci_release_regions(pdev);
 
index 88f8a8fa93cdcef2162f1867b46ed9525ef4fbf1..503cfadff4ace4c0c7858bba02a0322656550e82 100644 (file)
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444);
 MODULE_PARM_DESC(cpi_alg,
                 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
-/* workqueue for handling kernel ndo_set_rx_mode() calls */
-static struct workqueue_struct *nicvf_rx_mode_wq;
-
 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
 {
        if (nic->sqs_mode)
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
 {
        int timeout = NIC_MBOX_MSG_TIMEOUT;
        int sleep = 10;
+       int ret = 0;
+
+       mutex_lock(&nic->rx_mode_mtx);
 
        nic->pf_acked = false;
        nic->pf_nacked = false;
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
                        netdev_err(nic->netdev,
                                   "PF NACK to mbox msg 0x%02x from VF%d\n",
                                   (mbx->msg.msg & 0xFF), nic->vf_id);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       break;
                }
                msleep(sleep);
                if (nic->pf_acked)
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
                        netdev_err(nic->netdev,
                                   "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
                                   (mbx->msg.msg & 0xFF), nic->vf_id);
-                       return -EBUSY;
+                       ret = -EBUSY;
+                       break;
                }
        }
-       return 0;
+       mutex_unlock(&nic->rx_mode_mtx);
+       return ret;
 }
 
 /* Checks if VF is able to comminicate with PF
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic)
        return 1;
 }
 
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+       if (nicvf_send_msg_to_pf(nic, &mbx)) {
+               netdev_err(nic->netdev,
+                          "PF didn't respond to CFG DONE msg\n");
+       }
+}
+
 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
 {
        if (bgx->rx)
@@ -228,21 +242,24 @@ static void  nicvf_handle_mbx_intr(struct nicvf *nic)
                break;
        case NIC_MBOX_MSG_BGX_LINK_CHANGE:
                nic->pf_acked = true;
-               nic->link_up = mbx.link_status.link_up;
-               nic->duplex = mbx.link_status.duplex;
-               nic->speed = mbx.link_status.speed;
-               nic->mac_type = mbx.link_status.mac_type;
-               if (nic->link_up) {
-                       netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
-                                   nic->speed,
-                                   nic->duplex == DUPLEX_FULL ?
-                                   "Full" : "Half");
-                       netif_carrier_on(nic->netdev);
-                       netif_tx_start_all_queues(nic->netdev);
-               } else {
-                       netdev_info(nic->netdev, "Link is Down\n");
-                       netif_carrier_off(nic->netdev);
-                       netif_tx_stop_all_queues(nic->netdev);
+               if (nic->link_up != mbx.link_status.link_up) {
+                       nic->link_up = mbx.link_status.link_up;
+                       nic->duplex = mbx.link_status.duplex;
+                       nic->speed = mbx.link_status.speed;
+                       nic->mac_type = mbx.link_status.mac_type;
+                       if (nic->link_up) {
+                               netdev_info(nic->netdev,
+                                           "Link is Up %d Mbps %s duplex\n",
+                                           nic->speed,
+                                           nic->duplex == DUPLEX_FULL ?
+                                           "Full" : "Half");
+                               netif_carrier_on(nic->netdev);
+                               netif_tx_start_all_queues(nic->netdev);
+                       } else {
+                               netdev_info(nic->netdev, "Link is Down\n");
+                               netif_carrier_off(nic->netdev);
+                               netif_tx_stop_all_queues(nic->netdev);
+                       }
                }
                break;
        case NIC_MBOX_MSG_ALLOC_SQS:
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
        struct nicvf_cq_poll *cq_poll = NULL;
        union nic_mbx mbx = {};
 
+       cancel_delayed_work_sync(&nic->link_change_work);
+
+       /* wait till all queued set_rx_mode tasks completes */
+       drain_workqueue(nic->nicvf_rx_mode_wq);
+
        mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
        nicvf_send_msg_to_pf(nic, &mbx);
 
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
        return nicvf_send_msg_to_pf(nic, &mbx);
 }
 
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
+{
+       struct nicvf *nic = container_of(work_arg,
+                                        struct nicvf,
+                                        link_change_work.work);
+       union nic_mbx mbx = {};
+       mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+       nicvf_send_msg_to_pf(nic, &mbx);
+       queue_delayed_work(nic->nicvf_rx_mode_wq,
+                          &nic->link_change_work, 2 * HZ);
+}
+
 int nicvf_open(struct net_device *netdev)
 {
        int cpu, err, qidx;
        struct nicvf *nic = netdev_priv(netdev);
        struct queue_set *qs = nic->qs;
        struct nicvf_cq_poll *cq_poll = NULL;
-       union nic_mbx mbx = {};
+
+       /* wait till all queued set_rx_mode tasks completes if any */
+       drain_workqueue(nic->nicvf_rx_mode_wq);
 
        netif_carrier_off(netdev);
 
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev)
                nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
 
        /* Send VF config done msg to PF */
-       mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
-       nicvf_write_to_mbx(nic, &mbx);
+       nicvf_send_cfg_done(nic);
+
+       INIT_DELAYED_WORK(&nic->link_change_work,
+                         nicvf_link_status_check_task);
+       queue_delayed_work(nic->nicvf_rx_mode_wq,
+                          &nic->link_change_work, 0);
 
        return 0;
 cleanup:
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
 
        /* flush DMAC filters and reset RX mode */
        mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
-       nicvf_send_msg_to_pf(nic, &mbx);
+       if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+               goto free_mc;
 
        if (mode & BGX_XCAST_MCAST_FILTER) {
                /* once enabling filtering, we need to signal to PF to add
                 * its' own LMAC to the filter to accept packets for it.
                 */
                mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
-               mbx.xcast.data.mac = 0;
-               nicvf_send_msg_to_pf(nic, &mbx);
+               mbx.xcast.mac = 0;
+               if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+                       goto free_mc;
        }
 
        /* check if we have any specific MACs to be added to PF DMAC filter */
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
                /* now go through kernel list of MACs and add them one by one */
                for (idx = 0; idx < mc_addrs->count; idx++) {
                        mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
-                       mbx.xcast.data.mac = mc_addrs->mc[idx];
-                       nicvf_send_msg_to_pf(nic, &mbx);
+                       mbx.xcast.mac = mc_addrs->mc[idx];
+                       if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+                               goto free_mc;
                }
-               kfree(mc_addrs);
        }
 
        /* and finally set rx mode for PF accordingly */
        mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
-       mbx.xcast.data.mode = mode;
+       mbx.xcast.mode = mode;
 
        nicvf_send_msg_to_pf(nic, &mbx);
+free_mc:
+       kfree(mc_addrs);
 }
 
 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
 {
        struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
-                                                 work.work);
+                                                 work);
        struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
        u8 mode;
        struct xcast_addr_list *mc;
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
        kfree(nic->rx_mode_work.mc);
        nic->rx_mode_work.mc = mc_list;
        nic->rx_mode_work.mode = mode;
-       queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
+       queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
        spin_unlock(&nic->rx_mode_wq_lock);
 }
 
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        INIT_WORK(&nic->reset_task, nicvf_reset_task);
 
-       INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+       nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
+                                                       WQ_MEM_RECLAIM,
+                                                       nic->vf_id);
+       INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
        spin_lock_init(&nic->rx_mode_wq_lock);
+       mutex_init(&nic->rx_mode_mtx);
 
        err = register_netdev(netdev);
        if (err) {
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev)
        nic = netdev_priv(netdev);
        pnetdev = nic->pnicvf->netdev;
 
-       cancel_delayed_work_sync(&nic->rx_mode_work.work);
-
        /* Check if this Qset is assigned to different VF.
         * If yes, clean primary and all secondary Qsets.
         */
        if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
                unregister_netdev(pnetdev);
+       if (nic->nicvf_rx_mode_wq) {
+               destroy_workqueue(nic->nicvf_rx_mode_wq);
+               nic->nicvf_rx_mode_wq = NULL;
+       }
        nicvf_unregister_interrupts(nic);
        pci_set_drvdata(pdev, NULL);
        if (nic->drv_stats)
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = {
 static int __init nicvf_init_module(void)
 {
        pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
-       nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic",
-                                                  WQ_MEM_RECLAIM);
        return pci_register_driver(&nicvf_driver);
 }
 
 static void __exit nicvf_cleanup_module(void)
 {
-       if (nicvf_rx_mode_wq) {
-               destroy_workqueue(nicvf_rx_mode_wq);
-               nicvf_rx_mode_wq = NULL;
-       }
        pci_unregister_driver(&nicvf_driver);
 }
 
index e337da6ba2a4c16973f4728c9fd9564da162942c..673c57b8023fe3e2113cc1121f15d6700d12faa1 100644 (file)
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx)
 
        /* Disable MAC steering (NCSI traffic) */
        for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
-               bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+               bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
 }
 
 static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
index cbdd20b9ee6f1d4ede1551c37d585f3ecef0ab1b..5cbc54e9eb19c4e285fdcc0a8d38953ee59c2df3 100644 (file)
@@ -60,7 +60,7 @@
 #define  RX_DMACX_CAM_EN                       BIT_ULL(48)
 #define  RX_DMACX_CAM_LMACID(x)                        (((u64)x) << 49)
 #define  RX_DMAC_COUNT                         32
-#define BGX_CMR_RX_STREERING           0x300
+#define BGX_CMR_RX_STEERING            0x300
 #define  RX_TRAFFIC_STEER_RULE_COUNT           8
 #define BGX_CMR_CHAN_MSK_AND           0x450
 #define BGX_CMR_BIST_STATUS            0x460
index c041f44324db1d0e3cb16acd4b06925f6c481dcf..b3654598a2d5179ac1ae168772dd6718d7d9d9fa 100644 (file)
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
        lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
        lld->udb_density = 1 << adap->params.sge.eq_qpp;
        lld->ucq_density = 1 << adap->params.sge.iq_qpp;
+       lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
        lld->filt_mode = adap->params.tp.vlan_pri_map;
        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
        for (i = 0; i < NCHAN; i++)
index 5fa9a2d5fc4bafa11fe529ec3249ebea3fbcc0b9..21da34a4ca242ed44fa778f5773fb69fd2623fe8 100644 (file)
@@ -336,6 +336,7 @@ struct cxgb4_lld_info {
        unsigned int cclk_ps;                /* Core clock period in psec */
        unsigned short udb_density;          /* # of user DB/page */
        unsigned short ucq_density;          /* # of user CQs/page */
+       unsigned int sge_host_page_size;     /* SGE host page size */
        unsigned short filt_mode;            /* filter optional components */
        unsigned short tx_modq[NCHAN];       /* maps each tx channel to a */
                                             /* scheduler queue */
index b8155f5e71b41aee8c4e3b7e8cab151aac766ffb..ac55db065f167ad58f9ec41966afa7b0299b5f40 100644 (file)
@@ -3128,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
                dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
                dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
        }
+
+       put_device(&pdev->dev);
+
        return 0;
 }
 EXPORT_SYMBOL(hns_dsaf_roce_reset);
index f52e2c46e6a7b2beeafcb4b0c5c035afece68fcf..e4ff531db14a977dfe70b59c7ee0cc0465868d69 100644 (file)
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
             i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) :
             !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
        if (!ok) {
+               /* Log this in case the user has forgotten to give the kernel
+                * any buffers, even later in the application.
+                */
                dev_info(&vsi->back->pdev->dev,
-                        "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+                        "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
                         ring->xsk_umem ? "UMEM enabled " : "",
                         ring->queue_index, pf_q);
        }
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi)
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
                i40e_clean_tx_ring(vsi->tx_rings[i]);
-               if (i40e_enabled_xdp_vsi(vsi))
+               if (i40e_enabled_xdp_vsi(vsi)) {
+                       /* Make sure that in-progress ndo_xdp_xmit
+                        * calls are completed.
+                        */
+                       synchronize_rcu();
                        i40e_clean_tx_ring(vsi->xdp_rings[i]);
+               }
                i40e_clean_rx_ring(vsi->rx_rings[i]);
        }
 
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
        if (old_prog)
                bpf_prog_put(old_prog);
 
+       /* Kick start the NAPI context if there is an AF_XDP socket open
+        * on that queue id. This so that receiving will start.
+        */
+       if (need_reset && prog)
+               for (i = 0; i < vsi->num_queue_pairs; i++)
+                       if (vsi->xdp_rings[i]->xsk_umem)
+                               (void)i40e_xsk_async_xmit(vsi->netdev, i);
+
        return 0;
 }
 
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
 static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
 {
        i40e_clean_tx_ring(vsi->tx_rings[queue_pair]);
-       if (i40e_enabled_xdp_vsi(vsi))
+       if (i40e_enabled_xdp_vsi(vsi)) {
+               /* Make sure that in-progress ndo_xdp_xmit calls are
+                * completed.
+                */
+               synchronize_rcu();
                i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]);
+       }
        i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
 }
 
index a7e14e98889f142f5e363593f8ac5266f9063b56..6c97667d20eff136cde56c0447c54892c464fe31 100644 (file)
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        struct i40e_netdev_priv *np = netdev_priv(dev);
        unsigned int queue_index = smp_processor_id();
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
        struct i40e_ring *xdp_ring;
        int drops = 0;
        int i;
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        if (test_bit(__I40E_VSI_DOWN, vsi->state))
                return -ENETDOWN;
 
-       if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
+       if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
+           test_bit(__I40E_CONFIG_BUSY, pf->state))
                return -ENXIO;
 
        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
index 870cf654e4364480e41887ec0e0d054a05f4e25a..3827f16e692357bf2ad71156bb97682e778d5ed2 100644 (file)
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
+
+               /* Kick start the NAPI context so that receiving will start */
+               err = i40e_xsk_async_xmit(vsi->netdev, qid);
+               if (err)
+                       return err;
        }
 
        return 0;
index daff8183534b96b1aa3dbaaf482d5d7c5101c1f9..cb35d8202572442ca28526761fc41c0dc3a5b72f 100644 (file)
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
                        else
                                mrqc = IXGBE_MRQC_VMDQRSS64EN;
 
-                       /* Enable L3/L4 for Tx Switched packets */
-                       mrqc |= IXGBE_MRQC_L3L4TXSWEN;
+                       /* Enable L3/L4 for Tx Switched packets only for X550,
+                        * older devices do not support this feature
+                        */
+                       if (hw->mac.type >= ixgbe_mac_X550)
+                               mrqc |= IXGBE_MRQC_L3L4TXSWEN;
                } else {
                        if (tcs > 4)
                                mrqc = IXGBE_MRQC_RTRSS8TCEN;
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct bpf_prog *old_prog;
+       bool need_reset;
 
        if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
                return -EINVAL;
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
                return -ENOMEM;
 
        old_prog = xchg(&adapter->xdp_prog, prog);
+       need_reset = (!!prog != !!old_prog);
 
        /* If transitioning XDP modes reconfigure rings */
-       if (!!prog != !!old_prog) {
+       if (need_reset) {
                int err = ixgbe_setup_tc(dev, adapter->hw_tcs);
 
                if (err) {
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
        if (old_prog)
                bpf_prog_put(old_prog);
 
+       /* Kick start the NAPI context if there is an AF_XDP socket open
+        * on that queue id. This so that receiving will start.
+        */
+       if (need_reset && prog)
+               for (i = 0; i < adapter->num_rx_queues; i++)
+                       if (adapter->xdp_ring[i]->xsk_umem)
+                               (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
+
        return 0;
 }
 
index 65c3e2c979d4d89775d0d3fe9afad63a3046d075..36a8879536a4a552405de80ad6f6c3a319ebbd6e 100644 (file)
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
                ixgbe_txrx_ring_disable(adapter, qid);
 
        err = ixgbe_add_xsk_umem(adapter, umem, qid);
+       if (err)
+               return err;
 
-       if (if_running)
+       if (if_running) {
                ixgbe_txrx_ring_enable(adapter, qid);
 
-       return err;
+               /* Kick start the NAPI context so that receiving will start */
+               err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
+               if (err)
+                       return err;
+       }
+
+       return 0;
 }
 
 static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
        dma_addr_t dma;
 
        while (budget-- > 0) {
-               if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+               if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
+                   !netif_carrier_ok(xdp_ring->netdev)) {
                        work_done = false;
                        break;
                }
index 2f427271a793e4749f4a5f1d1163e738545ed173..292a668ce88e6f75580910eb1997e3b791a27d72 100644 (file)
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
 
        ret = mv643xx_eth_shared_of_probe(pdev);
        if (ret)
-               return ret;
+               goto err_put_clk;
        pd = dev_get_platdata(&pdev->dev);
 
        msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
        infer_hw_params(msp);
 
        return 0;
+
+err_put_clk:
+       if (!IS_ERR(msp->clk))
+               clk_disable_unprepare(msp->clk);
+       return ret;
 }
 
 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
index 9d4568eb2297f1b31a63d0fece85e78906a9ef4e..8433fb9c3eeeb0a723948d111fcda337383223f1 100644 (file)
@@ -2146,7 +2146,7 @@ err_drop_frame:
                        if (unlikely(!skb))
                                goto err_drop_frame_ret_pool;
 
-                       dma_sync_single_range_for_cpu(dev->dev.parent,
+                       dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
                                                      rx_desc->buf_phys_addr,
                                                      MVNETA_MH_SIZE + NET_SKB_PAD,
                                                      rx_bytes,
index f3a5fa84860f907748e702fb11521ff625d0f340..57727fe1501ee851ec5ca3e4dfe45d54ee5934e9 100644 (file)
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        INIT_WORK(&hw->restart_work, sky2_restart);
 
        pci_set_drvdata(pdev, hw);
-       pdev->d3_delay = 200;
+       pdev->d3_delay = 300;
 
        return 0;
 
index 6b88881b8e3585422f2548df3267175bc9d6b16f..c1438ae52a11922a79b281937f5d6bb5a7b4ca3b 100644 (file)
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->addr_len = ETH_ALEN;
        mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
        if (!is_valid_ether_addr(dev->dev_addr)) {
-               en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+               en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
                       priv->port, dev->dev_addr);
                err = -EINVAL;
                goto out;
index 32519c93df174c99d1a768209a4cb92ecdde5df3..b65e274b02e9920a603e3bf970155dad44c51d7f 100644 (file)
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                bool configure = false;
                bool pfc = false;
+               u16 thres_cells;
+               u16 delay_cells;
                bool lossy;
-               u16 thres;
 
                for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
                        if (prio_tc[j] == i) {
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
                        continue;
 
                lossy = !(pfc || pause_en);
-               thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
-               delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
-                                                 pause_en);
-               mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+               thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+               delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+                                                       pfc, pause_en);
+               mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+                                    thres_cells, lossy);
        }
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
index f6ecfa778660f81293420e2301ebbd3f320b6891..8f72587b5a2cbb9c79b8d0377d779650eccb7446 100644 (file)
@@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
 MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
 MODULE_LICENSE("GPL");
 module_param_named(debug, debug.msg_enable, int, 0);
-MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)");
+MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)");
 MODULE_ALIAS("spi:" DRV_NAME);
index 310807ef328bd5b59f72f9d202227fbe21b3f413..4d1b4a24907fde3c43bf7fa9774b7632f550c928 100644 (file)
@@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
 }
 
 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
-                                    unsigned int frame_length)
+                                    unsigned int frame_length,
+                                    int nr_frags)
 {
        /* called only from within lan743x_tx_xmit_frame.
         * assuming tx->ring_lock has already been acquired.
@@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
 
        /* wrap up previous descriptor */
        tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+       if (nr_frags <= 0) {
+               tx->frame_data0 |= TX_DESC_DATA0_LS_;
+               tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+       }
        tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
        tx_descriptor->data0 = tx->frame_data0;
 
@@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
        u32 tx_tail_flags = 0;
 
        /* wrap up previous descriptor */
-       tx->frame_data0 |= TX_DESC_DATA0_LS_;
-       tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+       if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+           TX_DESC_DATA0_DTYPE_DATA_) {
+               tx->frame_data0 |= TX_DESC_DATA0_LS_;
+               tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+       }
 
        tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
        buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
        }
 
        if (gso)
-               lan743x_tx_frame_add_lso(tx, frame_length);
+               lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
 
        if (nr_frags <= 0)
                goto finish;
index e23ca90289f71ca5776bab7e66659640de1857c2..0a868c829b907dda6c86ae57520eada246690dd9 100644 (file)
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 
 static int
 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
-             enum alu_op alu_op, bool skip)
+             enum alu_op alu_op)
 {
        const struct bpf_insn *insn = &meta->insn;
 
-       if (skip) {
-               meta->skip = true;
-               return 0;
-       }
-
        wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
        wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
 
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
 }
 
 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
 }
 
 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
 }
 
 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
 }
 
 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+       return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
 }
 
 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
index beb8e5d6401a99e85667c2b12ad17e4a36968b8c..ded556b7bab5e51d6cbaca834296fa0e66d72881 100644 (file)
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
        eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
 
+       if (!ether_addr_equal(ethh->h_dest,
+                             p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_RDMA,
+                          "Got unexpected mac %pM instead of %pM\n",
+                          ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+               return -EINVAL;
+       }
+
        ether_addr_copy(remote_mac_addr, ethh->h_source);
        ether_addr_copy(local_mac_addr, ethh->h_dest);
 
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
        struct qed_iwarp_info *iwarp_info;
        struct qed_ll2_acquire_data data;
        struct qed_ll2_cbs cbs;
-       u32 mpa_buff_size;
+       u32 buff_size;
        u16 n_ooo_bufs;
        int rc = 0;
        int i;
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
 
        memset(&data, 0, sizeof(data));
        data.input.conn_type = QED_LL2_TYPE_IWARP;
-       data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
+       data.input.mtu = params->max_mtu;
        data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
        data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
        data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
                goto err;
        }
 
+       buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
        rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
                                         QED_IWARP_LL2_SYN_RX_SIZE,
-                                        QED_IWARP_MAX_SYN_PKT_SIZE,
+                                        buff_size,
                                         iwarp_info->ll2_syn_handle);
        if (rc)
                goto err;
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
        if (rc)
                goto err;
 
-       mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
        rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
                                         data.input.rx_num_desc,
-                                        mpa_buff_size,
+                                        buff_size,
                                         iwarp_info->ll2_mpa_handle);
        if (rc)
                goto err;
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
 
        iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
 
-       iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+       iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
        if (!iwarp_info->mpa_intermediate_buf)
                goto err;
 
index b8f612d002419ea751c098e722e5574b855b8f9a..7ac959038324ef6f74a131f7844d7db1117d4892 100644 (file)
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
 
 #define QED_IWARP_LL2_SYN_TX_SIZE       (128)
 #define QED_IWARP_LL2_SYN_RX_SIZE       (256)
-#define QED_IWARP_MAX_SYN_PKT_SIZE      (128)
 
 #define QED_IWARP_LL2_OOO_DEF_TX_SIZE   (256)
 #define QED_IWARP_MAX_OOO              (16)
index 20299f6f65fce13d7deccacf5d27ba50cc858b46..736e29635b7729d0d53f85d1634c5a4c45fdfa18 100644 (file)
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
 static int dwmac4_rx_check_timestamp(void *desc)
 {
        struct dma_desc *p = (struct dma_desc *)desc;
+       unsigned int rdes0 = le32_to_cpu(p->des0);
+       unsigned int rdes1 = le32_to_cpu(p->des1);
+       unsigned int rdes3 = le32_to_cpu(p->des3);
        u32 own, ctxt;
        int ret = 1;
 
-       own = p->des3 & RDES3_OWN;
-       ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+       own = rdes3 & RDES3_OWN;
+       ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
                >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
 
        if (likely(!own && ctxt)) {
-               if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+               if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
                        /* Corrupted value */
                        ret = -EINVAL;
                else
index 5d85742a2be0b62189ce83dbc0de40714f6eb34c..3c749c327cbd4a0305c1833a72c60d6d2e253917 100644 (file)
@@ -696,25 +696,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
                                     struct ethtool_eee *edata)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       int ret;
 
-       priv->eee_enabled = edata->eee_enabled;
-
-       if (!priv->eee_enabled)
+       if (!edata->eee_enabled) {
                stmmac_disable_eee_mode(priv);
-       else {
+       else {
                /* We are asking for enabling the EEE but it is safe
                 * to verify all by invoking the eee_init function.
                 * In case of failure it will return an error.
                 */
-               priv->eee_enabled = stmmac_eee_init(priv);
-               if (!priv->eee_enabled)
+               edata->eee_enabled = stmmac_eee_init(priv);
+               if (!edata->eee_enabled)
                        return -EOPNOTSUPP;
-
-               /* Do not change tx_lpi_timer in case of failure */
-               priv->tx_lpi_timer = edata->tx_lpi_timer;
        }
 
-       return phy_ethtool_set_eee(dev->phydev, edata);
+       ret = phy_ethtool_set_eee(dev->phydev, edata);
+       if (ret)
+               return ret;
+
+       priv->eee_enabled = edata->eee_enabled;
+       priv->tx_lpi_timer = edata->tx_lpi_timer;
+       return 0;
 }
 
 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
index 1f612268c9987796aa35e18c990dfd45fc45628f..d847f672a705f47c6b4bdf579917385b2640da79 100644 (file)
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
                const char *name;
                char node_name[32];
 
-               if (of_property_read_string(node, "label", &name) < 0) {
+               if (of_property_read_string(child, "label", &name) < 0) {
                        snprintf(node_name, sizeof(node_name), "%pOFn", child);
                        name = node_name;
                }
index 3377ac66a34748bf03c5875410f808562794ed18..5583d993480d2c11ed17afa189385627dcbad17b 100644 (file)
@@ -692,15 +692,20 @@ out:
 static int geneve_open(struct net_device *dev)
 {
        struct geneve_dev *geneve = netdev_priv(dev);
-       bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
        bool metadata = geneve->collect_md;
+       bool ipv4, ipv6;
        int ret = 0;
 
+       ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+       ipv4 = !ipv6 || metadata;
 #if IS_ENABLED(CONFIG_IPV6)
-       if (ipv6 || metadata)
+       if (ipv6) {
                ret = geneve_sock_add(geneve, true);
+               if (ret < 0 && ret != -EAFNOSUPPORT)
+                       ipv4 = false;
+       }
 #endif
-       if (!ret && (!ipv6 || metadata))
+       if (ipv4)
                ret = geneve_sock_add(geneve, false);
        if (ret < 0)
                geneve_sock_release(geneve);
index 256adbd044f5ea138154c0a9126aacdaf1aafcb4..cf4897043e833618b5f5d6d452914ddbeff0e00d 100644 (file)
@@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net,
        schedule_delayed_work(&ndev_ctx->dwork, 0);
 }
 
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+       struct iphdr *iph = (struct iphdr *)skb->data;
+
+       iph->check = 0;
+       iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
                                             struct netvsc_channel *nvchan)
 {
@@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
        /* skb is already created with CHECKSUM_NONE */
        skb_checksum_none_assert(skb);
 
-       /*
-        * In Linux, the IP checksum is always checked.
-        * Do L4 checksum offload if enabled and present.
+       /* Incoming packets may have IP header checksum verified by the host.
+        * They may not have IP header checksum computed after coalescing.
+        * We compute it here if the flags are set, because on Linux, the IP
+        * checksum is always checked.
+        */
+       if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+           csum_info->receive.ip_checksum_succeeded &&
+           skb->protocol == htons(ETH_P_IP))
+               netvsc_comp_ipcsum(skb);
+
+       /* Do L4 checksum offload if enabled and present.
         */
        if (csum_info && (net->features & NETIF_F_RXCSUM)) {
                if (csum_info->receive.tcp_checksum_succeeded ||
index 7cdac77d0c68527630b89c2eaff874582200d93a..07e41c42bcf5e923285e1e3435247b47fa90d071 100644 (file)
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev,
 
        if (!data)
                return 0;
+       if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
 
        if (data[IFLA_IPVLAN_MODE]) {
                u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
                struct ipvl_dev *tmp = netdev_priv(phy_dev);
 
                phy_dev = tmp->phy_dev;
+               if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+                       return -EPERM;
        } else if (!netif_is_ipvlan_port(phy_dev)) {
                /* Exit early if the underlying link is invalid or busy */
                if (phy_dev->type != ARPHRD_ETHER ||
index da6a67d47ce99e21d7034c004b7b5306778cbe79..56fa3606cb9cb2db6e43e13a40f3401d5396823a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/phy.h>
+#include <linux/delay.h>
 
 #include <dt-bindings/net/ti-dp83867.h>
 
@@ -325,6 +326,8 @@ static int dp83867_phy_reset(struct phy_device *phydev)
        if (err < 0)
                return err;
 
+       usleep_range(10, 20);
+
        return dp83867_config_init(phydev);
 }
 
index 82ab6ed3b74ee5b6f3229d62fa276901c53df454..6bac602094bd3955b66a9b6a15f54a4261abe997 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/marvell_phy.h>
 #include <linux/phy.h>
 
+#define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK        0x01e0
+
 enum {
        MV_PCS_BASE_T           = 0x0000,
        MV_PCS_BASE_R           = 0x1000,
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev)
        else
                reg = 0;
 
+       /* Make sure we clear unsupported 2.5G/5G advertising */
        ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
-                           MDIO_AN_10GBT_CTRL_ADV10G, reg);
+                           MDIO_AN_10GBT_CTRL_ADV10G |
+                           MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg);
        if (ret < 0)
                return ret;
        if (ret > 0)
index 66b9cfe692fc707be1acb6a3f87dbfc996566c6a..7368616286ae9ced5b44e782e69877604025a7b6 100644 (file)
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
        err = device_register(&bus->dev);
        if (err) {
                pr_err("mii_bus %s failed to register\n", bus->id);
-               put_device(&bus->dev);
                return -EINVAL;
        }
 
index b1f959935f50ceba9d23850e01d56f4f16d28e63..b7df0295a3ca380f74c831f3f8f1d9ed9d93b100 100644 (file)
@@ -344,6 +344,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+       if (ret)
+               return ret;
+
+       return kszphy_config_init(phydev);
+}
+
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
                                       const struct device_node *of_node,
                                       u16 reg,
@@ -1040,7 +1051,7 @@ static struct phy_driver ksphy_driver[] = {
        .name           = "Micrel KSZ8061",
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .features       = PHY_BASIC_FEATURES,
-       .config_init    = kszphy_config_init,
+       .config_init    = ksz8061_config_init,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
        .suspend        = genphy_suspend,
index 938803237d7fa54ed70e7118bfe4d5b2dfbf0c47..85987aac31c48f4349fe8bd977cbf05e1def87b7 100644 (file)
@@ -320,6 +320,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
        linkmode_zero(state->lp_advertising);
        state->interface = pl->link_config.interface;
        state->an_enabled = pl->link_config.an_enabled;
+       state->speed = SPEED_UNKNOWN;
+       state->duplex = DUPLEX_UNKNOWN;
+       state->pause = MLO_PAUSE_NONE;
+       state->an_complete = 0;
        state->link = 1;
 
        return pl->ops->mac_link_state(ndev, state);
index c6010fb1aa0f2049788d1991135a944606438372..cb4a23041a94a80ed8a2b6dcc50730beee22bd64 100644 (file)
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = {
                .name           = "RTL8366RB Gigabit Ethernet",
                .features       = PHY_GBIT_FEATURES,
                .config_init    = &rtl8366rb_config_init,
+               /* These interrupts are handled by the irq controller
+                * embedded inside the RTL8366RB, they get unmasked when the
+                * irq is requested and ACKed by reading the status register,
+                * which is done by the irqchip code.
+                */
+               .ack_interrupt  = genphy_no_ack_interrupt,
+               .config_intr    = genphy_no_config_intr,
                .suspend        = genphy_suspend,
                .resume         = genphy_resume,
        },
index 74a8782313cf5b0319a7f3bad936926c4f4481da..bd6084e315de282b9f92558864cc392727898d7b 100644 (file)
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
        u16 val = 0;
        int err;
 
-       err = priv->phy_drv->read_status(phydev);
+       if (priv->phy_drv->read_status)
+               err = priv->phy_drv->read_status(phydev);
+       else
+               err = genphy_read_status(phydev);
        if (err < 0)
                return err;
 
index 958f1cf67282d46fbc379930a333d2d47a295709..6ce3f666d142a801f195451e33f6eab3d1a2f4c6 100644 (file)
@@ -1256,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
        list_add_tail_rcu(&port->list, &team->port_list);
        team_port_enable(team, port);
        __team_compute_features(team);
-       __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+       __team_port_change_port_added(port, !!netif_oper_up(port_dev));
        __team_options_change_check(team);
 
        netdev_info(dev, "Port device %s added\n", portname);
@@ -2915,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused,
 
        switch (event) {
        case NETDEV_UP:
-               if (netif_carrier_ok(dev))
+               if (netif_oper_up(dev))
                        team_port_change_check(port, true);
                break;
        case NETDEV_DOWN:
index fed298c0cb393e126243c84b44006b94192a1c5e..53f4f37b0ffd3e92bc742b0ee42948c5bdab0b1e 100644 (file)
@@ -2167,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
        }
 
        add_wait_queue(&tfile->wq.wait, &wait);
-       current->state = TASK_INTERRUPTIBLE;
 
        while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
                ptr = ptr_ring_consume(&tfile->tx_ring);
                if (ptr)
                        break;
@@ -2185,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
                schedule();
        }
 
-       current->state = TASK_RUNNING;
+       __set_current_state(TASK_RUNNING);
        remove_wait_queue(&tfile->wq.wait, &wait);
 
 out:
index 735ad838e2ba86ab689c549d4a89e5ca54fecd95..18af2f8eee96a87418dd93005600e661a93a818a 100644 (file)
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 19)},   /* Sierra Wireless MC7710 in QMI mode */
-       {QMI_FIXED_INTF(0x1199, 0x68c0, 8)},    /* Sierra Wireless MC7304/MC7354 */
-       {QMI_FIXED_INTF(0x1199, 0x68c0, 10)},   /* Sierra Wireless MC7304/MC7354 */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
        {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
index 60dd1ec1665f992ea9b50551672d4a85a5b6b670..86c8c64fbb0f33e920a5fbe77e2d73fe69c14991 100644 (file)
@@ -557,6 +557,7 @@ enum spd_duplex {
 /* MAC PASSTHRU */
 #define AD_MASK                        0xfee0
 #define BND_MASK               0x0004
+#define BD_MASK                        0x0001
 #define EFUSE                  0xcfdb
 #define PASS_THRU_MASK         0x1
 
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
                        return -ENODEV;
                }
        } else {
-               /* test for RTL8153-BND */
+               /* test for RTL8153-BND and RTL8153-BD */
                ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
-               if ((ocp_data & BND_MASK) == 0) {
+               if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) {
                        netif_dbg(tp, probe, tp->netdev,
                                  "Invalid variant for MAC pass through\n");
                        return -ENODEV;
index 95909e262ba4369fa853acf24c62bf271670d4ad..7c1430ed02445b6e6f13c663b555ef550276c899 100644 (file)
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev)
 
        /* default to no qdisc; user can add if desired */
        dev->priv_flags |= IFF_NO_QUEUE;
+
+       dev->min_mtu = 0;
+       dev->max_mtu = 0;
 }
 
 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
index 320edcac469985308ea96a7883dcdf86367d83ae..6359053bd0c783e0a85c0954ebec52717a288245 100644 (file)
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        goto out_err;
                }
 
-               genlmsg_reply(skb, info);
+               res = genlmsg_reply(skb, info);
                break;
        }
 
index 0e6b43bb4678a9cd01d7883f5c6c37cf8006a9cb..a5ea3ba495a4d601bdbb71bcc47ef436f31a60b4 100644 (file)
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = {
        .get_txpower = mt76x02_get_txpower,
 };
 
-static int mt76x0u_register_device(struct mt76x02_dev *dev)
+static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
 {
-       struct ieee80211_hw *hw = dev->mt76.hw;
        int err;
 
-       err = mt76u_alloc_queues(&dev->mt76);
-       if (err < 0)
-               goto out_err;
-
-       err = mt76u_mcu_init_rx(&dev->mt76);
-       if (err < 0)
-               goto out_err;
-
        mt76x0_chip_onoff(dev, true, true);
-       if (!mt76x02_wait_for_mac(&dev->mt76)) {
-               err = -ETIMEDOUT;
-               goto out_err;
-       }
+
+       if (!mt76x02_wait_for_mac(&dev->mt76))
+               return -ETIMEDOUT;
 
        err = mt76x0u_mcu_init(dev);
        if (err < 0)
-               goto out_err;
+               return err;
 
        mt76x0_init_usb_dma(dev);
        err = mt76x0_init_hardware(dev);
        if (err < 0)
-               goto out_err;
+               return err;
 
        mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
        mt76_wr(dev, MT_TXOP_CTRL_CFG,
                FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
                FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
 
+       return 0;
+}
+
+static int mt76x0u_register_device(struct mt76x02_dev *dev)
+{
+       struct ieee80211_hw *hw = dev->mt76.hw;
+       int err;
+
+       err = mt76u_alloc_queues(&dev->mt76);
+       if (err < 0)
+               goto out_err;
+
+       err = mt76u_mcu_init_rx(&dev->mt76);
+       if (err < 0)
+               goto out_err;
+
+       err = mt76x0u_init_hardware(dev);
+       if (err < 0)
+               goto out_err;
+
        err = mt76x0_register_device(dev);
        if (err < 0)
                goto out_err;
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
 
        mt76u_stop_queues(&dev->mt76);
        mt76x0u_mac_stop(dev);
+       clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+       mt76x0_chip_onoff(dev, false, false);
        usb_kill_urb(usb->mcu.res.urb);
 
        return 0;
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
        tasklet_enable(&usb->rx_tasklet);
        tasklet_enable(&usb->tx_tasklet);
 
-       ret = mt76x0_init_hardware(dev);
+       ret = mt76x0u_init_hardware(dev);
        if (ret)
                goto err;
 
index 0ccb021f1e78687d7c7a9814a05369aafe7c6508..10d580c3dea3f06b53d4bdbff22f693a237e741d 100644 (file)
@@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif)
        if (xenvif_hash_cache_size == 0)
                return;
 
+       BUG_ON(vif->hash.cache.count);
+
        spin_lock_init(&vif->hash.cache.lock);
        INIT_LIST_HEAD(&vif->hash.cache.list);
 }
index 182d6770f1027120b5cd7165e4b7eea7c8297299..6da12518e6934ce20ec6a1becfdbbef07f430822 100644 (file)
@@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
        struct xenvif *vif = netdev_priv(dev);
        unsigned int size = vif->hash.size;
+       unsigned int num_queues;
+
+       /* If queues are not set up internally - always return 0
+        * as the packet going to be dropped anyway */
+       num_queues = READ_ONCE(vif->num_queues);
+       if (num_queues < 1)
+               return 0;
 
        if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
                return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
index 80aae3a32c2a3ad90d0ca8c98a37e666de1bd4a6..f09948b009dd00ca79670daf7da5e27028cc047a 100644 (file)
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
                skb_frag_size_set(&frags[i], len);
        }
 
-       /* Copied all the bits from the frag list -- free it. */
-       skb_frag_list_init(skb);
-       xenvif_skb_zerocopy_prepare(queue, nskb);
-       kfree_skb(nskb);
-
        /* Release all the original (foreign) frags. */
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
                xenvif_fill_frags(queue, skb);
 
                if (unlikely(skb_has_frag_list(skb))) {
+                       struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+                       xenvif_skb_zerocopy_prepare(queue, nskb);
                        if (xenvif_handle_frag_list(queue, skb)) {
                                if (net_ratelimit())
                                        netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
                                kfree_skb(skb);
                                continue;
                        }
+                       /* Copied all the bits from the frag list -- free it. */
+                       skb_frag_list_init(skb);
+                       kfree_skb(nskb);
                }
 
                skb->dev      = queue->vif->dev;
index c69ca95b1ad571368c89ccb8146aaa5b83a7c343..0f140a8021375a5d829f94d35c7145f75126a696 100644 (file)
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = {
 
 static const char * const sdxc_a_groups[] = {
        "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
-       "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+       "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
 };
 
 static const char * const pcm_a_groups[] = {
index 7aae52a09ff03e48ba974014e42c6a5ab3ef0a0a..4ffd56ff809eb143437830de1aa829d4ab431708 100644 (file)
@@ -79,7 +79,7 @@ enum {
                .intr_cfg_reg = 0,                      \
                .intr_status_reg = 0,                   \
                .intr_target_reg = 0,                   \
-               .tile = NORTH,                          \
+               .tile = SOUTH,                          \
                .mux_bit = -1,                          \
                .pull_bit = pull,                       \
                .drv_bit = drv,                         \
index a3c20e3a8b7cb967f8eefa16097214f42bebfa34..3337b1e80412b045b3609eb794e6d22da784c271 100644 (file)
@@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        struct Scsi_Host *host = NULL;
        TW_Device_Extension *tw_dev;
        unsigned long mem_addr, mem_len;
-       int retval = -ENODEV;
+       int retval;
 
        retval = pci_enable_device(pdev);
        if (retval) {
@@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (retval)
+               retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval) {
                TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
                retval = -ENODEV;
                goto out_disable_device;
@@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (retval)
+               retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval) {
                TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
                retval = -ENODEV;
                goto out_disable_device;
index cd096104bcec18d2f25eea1e64effe560ea38c29..dda6fa8577093d6302f35fbd9fd7a9c51eeed115 100644 (file)
@@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (retval)
+               retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval) {
                TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
                retval = -ENODEV;
                goto out_disable_device;
@@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (retval)
+               retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval) {
                TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
                retval = -ENODEV;
                goto out_disable_device;
index 07efcb9b5b943109bea84daf60fc223263b8d376..bbdae67774f064526cd7e4eafcc6a8c7aeab35c3 100644 (file)
@@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (err)
                goto Err_remove;
 
-       err = -ENODEV;
-       if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
+       err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64));
+       if (err)
+               err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+       if (err) {
+               err = -ENODEV;
                asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
                goto Err_remove;
        }
index 42a0caf6740da3fd0af1e933e79616e30460a37e..88880a66a18929f5c039ace1999c531a7a3e396d 100644 (file)
@@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad)
 int
 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
 {
-       int             rc = -ENODEV;
+       int rc = -ENODEV;
 
        if (pci_enable_device(pdev)) {
                printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
@@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
 
        pci_set_master(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+       if (rc) {
+               rc = -ENODEV;
                printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
                goto out_release_region;
        }
@@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
 {
        struct bfad_s *bfad = pci_get_drvdata(pdev);
        u8 byte;
+       int rc;
 
        dev_printk(KERN_ERR, &pdev->dev,
                   "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags);
@@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
        pci_save_state(pdev);
        pci_set_master(pdev);
 
-       if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32)))
+       rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
+                                              DMA_BIT_MASK(32));
+       if (rc)
                goto out_disable_device;
 
        if (restart_bfa(bfad) == -1)
index cf629380a9813a41d20486f33d17b9ff5490c81a..616b25bf7941b1b6afa080c30f9a195d40986d44 100644 (file)
@@ -210,8 +210,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rv)
+               rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rv) {
+               rv = -ENODEV;
                dev_err(&pdev->dev, "No suitable DMA available.\n");
                goto err_release_regions;
        }
index eed7fc5b3389f8291c7f04d3b425f0944c580228..bc17fa0d837555af82f04bee2b7bad6372caf8a3 100644 (file)
@@ -2323,6 +2323,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
        struct Scsi_Host *shost;
        struct hisi_hba *hisi_hba;
        struct device *dev = &pdev->dev;
+       int error;
 
        shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
        if (!shost) {
@@ -2343,8 +2344,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
        if (hisi_sas_get_fw_info(hisi_hba) < 0)
                goto err_out;
 
-       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
-           dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+       error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       if (error)
+               error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+       if (error) {
                dev_err(dev, "No usable DMA addressing method\n");
                goto err_out;
        }
index c92b3822c40823fa283711746a8f4da9a527f539..e0570fd8466ed865030d5b509926d06cf1ec3594 100644 (file)
@@ -2447,10 +2447,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (rc)
                goto err_out_disable_device;
 
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc) {
                dev_err(dev, "No usable DMA addressing method\n");
-               rc = -EIO;
+               rc = -ENODEV;
                goto err_out_regions;
        }
 
index 3eedfd4f8f5790e6e658741abc76efa987bb34f0..251c084a6ff04d677e16db4245331d6265305cf2 100644 (file)
@@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
        dma_addr_t start_phy;
        void *start_virt;
        u32 offset, i, req_size;
+       int rc;
 
        dprintk("hptiop_probe(%p)\n", pcidev);
 
@@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
 
        /* Enable 64bit DMA if possible */
        iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
-       if (dma_set_mask(&pcidev->dev,
-                        DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) ||
-           dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) {
+       rc = dma_set_mask(&pcidev->dev,
+                         DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
+       if (rc)
+               rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
+
+       if (rc) {
                printk(KERN_ERR "hptiop: fail to set dma_mask\n");
                goto disable_pci_device;
        }
index b8d325ce8754b4c2b66f275ab5cdba06e73b2e6e..120fc520f27a3b4bc1b9e6bd0642b139ad88b39d 100644 (file)
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
        if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
                return -ENODATA;
 
+       spin_lock_bh(&conn->session->back_lock);
+       if (conn->task == NULL) {
+               spin_unlock_bh(&conn->session->back_lock);
+               return -ENODATA;
+       }
        __iscsi_get_task(task);
+       spin_unlock_bh(&conn->session->back_lock);
        spin_unlock_bh(&conn->session->frwd_lock);
        rc = conn->session->tt->xmit_task(task);
        spin_lock_bh(&conn->session->frwd_lock);
index 17eb4185f29de5b65a78ba3c344304666b5663e0..f21c93bbb35c7e8d6f878dba523a9c53491080a4 100644 (file)
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev(
                rphy = sas_end_device_alloc(phy->port);
                if (!rphy)
                        goto out_free;
+               rphy->identify.phy_identifier = phy_id;
 
                child->rphy = rphy;
                get_device(&rphy->dev);
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev(
 
                child->rphy = rphy;
                get_device(&rphy->dev);
+               rphy->identify.phy_identifier = phy_id;
                sas_fill_in_rphy(child, rphy);
 
                list_add_tail(&child->disco_list_node, &parent->port->disco_list);
index bede11e163499641716c664e56d9e8a32d7de849..e1129260ed18175da0a944d4e257e2b9a684e036 100644 (file)
@@ -7361,15 +7361,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
        unsigned long bar0map_len, bar2map_len;
        int i, hbq_count;
        void *ptr;
-       int error = -ENODEV;
+       int error;
 
        if (!pdev)
-               return error;
+               return -ENODEV;
 
        /* Set the device DMA mask size */
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+       error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (error)
+               error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (error)
                return error;
+       error = -ENODEV;
 
        /* Get the bus address of Bar0 and Bar2 and the number of bytes
         * required by each mapping.
@@ -9742,11 +9745,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
        uint32_t if_type;
 
        if (!pdev)
-               return error;
+               return -ENODEV;
 
        /* Set the device DMA mask size */
-       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
-           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+       error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (error)
+               error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (error)
                return error;
 
        /*
index 6d65ac584eba0178846b3b0fc6526f6c10ec8863..a6828391d6b3777873782221c98885c4b8f41885 100644 (file)
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
                set_host_byte(cmd, DID_OK);
                return BLK_STS_TARGET;
        case DID_NEXUS_FAILURE:
+               set_host_byte(cmd, DID_OK);
                return BLK_STS_NEXUS;
        case DID_ALLOC_FAILURE:
                set_host_byte(cmd, DID_OK);
@@ -2597,7 +2598,6 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
-       WARN_ON_ONCE(!sdev->quiesced_by);
        sdev->quiesced_by = NULL;
        blk_clear_pm_only(sdev->request_queue);
        if (sdev->sdev_state == SDEV_QUIESCE)
index fff86940388bab1315c000b241a5456b297cd418..a340af797a850d68010ebb4b600318122fa0227d 100644 (file)
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
                return -EOPNOTSUPP;
 
        /*
-        * Get a reply buffer for the number of requested zones plus a header.
-        * For ATA, buffers must be aligned to 512B.
+        * Get a reply buffer for the number of requested zones plus a header,
+        * without exceeding the device maximum command size. For ATA disks,
+        * buffers must be aligned to 512B.
         */
-       buflen = roundup((nrz + 1) * 64, 512);
+       buflen = min(queue_max_hw_sectors(disk->queue) << 9,
+                    roundup((nrz + 1) * 64, 512));
        buf = kmalloc(buflen, gfp_mask);
        if (!buf)
                return -ENOMEM;
index e5efce3c08e2eee08c3a3bc6fb330da07df5c4b8..947f9b28de9ed1b2f45542aec6cdf24367df3ff8 100644 (file)
@@ -699,8 +699,10 @@ static int __init optee_driver_init(void)
                return -ENODEV;
 
        np = of_find_matching_node(fw_np, optee_match);
-       if (!np || !of_device_is_available(np))
+       if (!np || !of_device_is_available(np)) {
+               of_node_put(np);
                return -ENODEV;
+       }
 
        optee = optee_probe(np);
        of_node_put(np);
index 24a129fcdd61b38bd8bb5045d22dd4ca0884a59f..a2e5dc7716e21c7cbed163159a0c1262df977227 100644 (file)
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
 
        ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
                             len, iov, 64, VHOST_ACCESS_WO);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        for (i = 0; i < ret; i++) {
index cf445dbd5f2e05d4c716dadb3123fb397537d4e6..9de46116c7492a712ede0c8a241eea71ef94230b 100644 (file)
@@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
 
                rcu_assign_pointer(cell->vl_servers, vllist);
                cell->dns_expiry = TIME64_MAX;
+               __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags);
        } else {
                cell->dns_expiry = ktime_get_real_seconds();
        }
index 7cde3f46ad263ab084aafaefa14161902f33d4f1..e996174cbfc026d3d3471cb01a75b437f5b46c1b 100644 (file)
 #include <linux/err.h>
 #include <linux/fs.h>
 
+static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
+static inline char *next_non_spacetab(char *first, const char *last)
+{
+       for (; first <= last; first++)
+               if (!spacetab(*first))
+                       return first;
+       return NULL;
+}
+static inline char *next_terminator(char *first, const char *last)
+{
+       for (; first <= last; first++)
+               if (spacetab(*first) || !*first)
+                       return first;
+       return NULL;
+}
+
 static int load_script(struct linux_binprm *bprm)
 {
        const char *i_arg, *i_name;
-       char *cp;
+       char *cp, *buf_end;
        struct file *file;
        int retval;
 
+       /* Not ours to exec if we don't start with "#!". */
        if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
                return -ENOEXEC;
 
@@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm)
        if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
                return -ENOENT;
 
-       /*
-        * This section does the #! interpretation.
-        * Sorta complicated, but hopefully it will work.  -TYT
-        */
-
+       /* Release since we are not mapping a binary into memory. */
        allow_write_access(bprm->file);
        fput(bprm->file);
        bprm->file = NULL;
 
-       bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
-       if ((cp = strchr(bprm->buf, '\n')) == NULL)
-               cp = bprm->buf+BINPRM_BUF_SIZE-1;
+       /*
+        * This section handles parsing the #! line into separate
+        * interpreter path and argument strings. We must be careful
+        * because bprm->buf is not yet guaranteed to be NUL-terminated
+        * (though the buffer will have trailing NUL padding when the
+        * file size was smaller than the buffer size).
+        *
+        * We do not want to exec a truncated interpreter path, so either
+        * we find a newline (which indicates nothing is truncated), or
+        * we find a space/tab/NUL after the interpreter path (which
+        * itself may be preceded by spaces/tabs). Truncating the
+        * arguments is fine: the interpreter can re-read the script to
+        * parse them on its own.
+        */
+       buf_end = bprm->buf + sizeof(bprm->buf) - 1;
+       cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+       if (!cp) {
+               cp = next_non_spacetab(bprm->buf + 2, buf_end);
+               if (!cp)
+                       return -ENOEXEC; /* Entire buf is spaces/tabs */
+               /*
+                * If there is no later space/tab/NUL we must assume the
+                * interpreter path is truncated.
+                */
+               if (!next_terminator(cp, buf_end))
+                       return -ENOEXEC;
+               cp = buf_end;
+       }
+       /* NUL-terminate the buffer and any trailing spaces/tabs. */
        *cp = '\0';
        while (cp > bprm->buf) {
                cp--;
index 041c27ea8de155a0002bdb5af25eb2fc5f8e6efa..f74193da0e092b09254fec756bde39f5bfed649d 100644 (file)
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
             capsnap->size);
 
        spin_lock(&mdsc->snap_flush_lock);
-       list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+       if (list_empty(&ci->i_snap_flush_item))
+               list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
        spin_unlock(&mdsc->snap_flush_lock);
        return 1;  /* caller may want to ceph_flush_snaps */
 }
index 32920a10100e23fc60f53cf36c882278ae972cee..a7fa037b876b7fa32afcc891345cd44f0315b4f9 100644 (file)
@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
        rc = migrate_huge_page_move_mapping(mapping, newpage, page);
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
+
+       /*
+        * page_private is subpool pointer in hugetlb pages.  Transfer to
+        * new page.  PagePrivate is not associated with page_private for
+        * hugetlb pages and can not be set here as only page_huge_active
+        * pages can be migrated.
+        */
+       if (page_private(page)) {
+               set_page_private(newpage, page_private(page));
+               set_page_private(page, 0);
+       }
+
        if (mode != MIGRATE_SYNC_NO_COPY)
                migrate_page_copy(newpage, page);
        else
index a677b59efd74e25f52fd3db3e610c6c5193fb171..678ef175d63ae7f4b81efc595e4dd82b2c4b8741 100644 (file)
@@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
        if (!access_ok(from, n))
                return n;
 
-       current->kernel_uaccess_faults_ok++;
        while (n) {
                if (__get_user(c, f)) {
                        memset(t, 0, n);
@@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from,
                f++;
                n--;
        }
-       current->kernel_uaccess_faults_ok--;
        return n;
 }
 
index 3f23b6840547e71ebd4c228c15412ad62dfab600..bf34ddaa2ad741e1c4d7cc7836f3851483a973bb 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/keyctl.h>
 #include <linux/key-type.h>
 #include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
 #include <linux/module.h>
 
 #include "internal.h"
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy;
 struct idmap_legacy_upcalldata {
        struct rpc_pipe_msg pipe_msg;
        struct idmap_msg idmap_msg;
-       struct key_construction *key_cons;
+       struct key      *authkey;
        struct idmap *idmap;
 };
 
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = {
        { Opt_find_err, NULL }
 };
 
-static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
+static int nfs_idmap_legacy_upcall(struct key *, void *);
 static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
                                   size_t);
 static void idmap_release_pipe(struct inode *);
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
 static void
 nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
 {
-       struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
+       struct key *authkey = idmap->idmap_upcall_data->authkey;
 
        kfree(idmap->idmap_upcall_data);
        idmap->idmap_upcall_data = NULL;
-       complete_request_key(cons, ret);
+       complete_request_key(authkey, ret);
+       key_put(authkey);
 }
 
 static void
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
                nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
 }
 
-static int nfs_idmap_legacy_upcall(struct key_construction *cons,
-                                  const char *op,
-                                  void *aux)
+static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux)
 {
        struct idmap_legacy_upcalldata *data;
+       struct request_key_auth *rka = get_request_key_auth(authkey);
        struct rpc_pipe_msg *msg;
        struct idmap_msg *im;
        struct idmap *idmap = (struct idmap *)aux;
-       struct key *key = cons->key;
+       struct key *key = rka->target_key;
        int ret = -ENOKEY;
 
        if (!aux)
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
        msg = &data->pipe_msg;
        im = &data->idmap_msg;
        data->idmap = idmap;
-       data->key_cons = cons;
+       data->authkey = key_get(authkey);
 
        ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
        if (ret < 0)
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
 out2:
        kfree(data);
 out1:
-       complete_request_key(cons, ret);
+       complete_request_key(authkey, ret);
        return ret;
 }
 
@@ -651,9 +652,10 @@ out:
 static ssize_t
 idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 {
+       struct request_key_auth *rka;
        struct rpc_inode *rpci = RPC_I(file_inode(filp));
        struct idmap *idmap = (struct idmap *)rpci->private;
-       struct key_construction *cons;
+       struct key *authkey;
        struct idmap_msg im;
        size_t namelen_in;
        int ret = -ENOKEY;
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        if (idmap->idmap_upcall_data == NULL)
                goto out_noupcall;
 
-       cons = idmap->idmap_upcall_data->key_cons;
+       authkey = idmap->idmap_upcall_data->authkey;
+       rka = get_request_key_auth(authkey);
 
        if (mlen != sizeof(im)) {
                ret = -ENOSPC;
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
 
        ret = nfs_idmap_read_and_verify_message(&im,
                        &idmap->idmap_upcall_data->idmap_msg,
-                       cons->key, cons->authkey);
+                       rka->target_key, authkey);
        if (ret >= 0) {
-               key_set_timeout(cons->key, nfs_idmap_cache_timeout);
+               key_set_timeout(rka->target_key, nfs_idmap_cache_timeout);
                ret = mlen;
        }
 
index a5a2fe76568fbbfe3e4c720979457291d5d7dcdb..b094d3d79354af2196285404d5093cc02e169046 100644 (file)
@@ -398,8 +398,6 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter
        loff_t pos = iocb->ki_pos;
        ssize_t rc = 0;
 
-       BUG_ON(iocb->private);
-
        gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
 
        orangefs_stats.reads++;
@@ -416,8 +414,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
        loff_t pos;
        ssize_t rc;
 
-       BUG_ON(iocb->private);
-
        gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
 
        inode_lock(file->f_mapping->host);
index 633a6346257346595a71843092fb79efd71c14f8..f5ed9512d193a2d93df6fc54cd2202ad3367fb41 100644 (file)
@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
 
                        task_lock(p);
                        if (!p->vfork_done && process_shares_mm(p, mm)) {
-                               pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
-                                               task_pid_nr(p), p->comm,
-                                               p->signal->oom_score_adj, oom_adj,
-                                               task_pid_nr(task), task->comm);
                                p->signal->oom_score_adj = oom_adj;
                                if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
                                        p->signal->oom_score_adj_min = (short)oom_adj;
index ca46a45a9cce2ae7f4638ed5c5794a71baf6d104..570f9d03b2eb37363a49c9ed13896df195082b96 100644 (file)
@@ -767,7 +767,7 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
  *
  * Returns true if the @feature is supported, false otherwise.
  */
-static inline bool drm_core_check_feature(struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
 {
        return dev->driver->driver_features & dev->driver_features & feature;
 }
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h
new file mode 100644 (file)
index 0000000..a726dd3
--- /dev/null
@@ -0,0 +1,36 @@
+/* request_key authorisation token key type
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H
+#define _KEYS_REQUEST_KEY_AUTH_TYPE_H
+
+#include <linux/key.h>
+
+/*
+ * Authorisation record for request_key().
+ */
+struct request_key_auth {
+       struct key              *target_key;
+       struct key              *dest_keyring;
+       const struct cred       *cred;
+       void                    *callout_info;
+       size_t                  callout_len;
+       pid_t                   pid;
+       char                    op[8];
+} __randomize_layout;
+
+static inline struct request_key_auth *get_request_key_auth(const struct key *key)
+{
+       return key->payload.data[0];
+}
+
+
+#endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */
index e098cbe27db5462f155a3a9c64169d3c68a549c5..12babe9915944f84a040199451124763d15d4bd0 100644 (file)
@@ -31,7 +31,7 @@
 struct user_key_payload {
        struct rcu_head rcu;            /* RCU destructor */
        unsigned short  datalen;        /* length of this data */
-       char            data[0];        /* actual data */
+       char            data[0] __aligned(__alignof__(u64)); /* actual data */
 };
 
 extern struct key_type key_type_user;
index bc9af551fc83821e5bec98e5cbc582b2fe0be07a..e49d1de0614eb521318abce6e6b72f3a47a09dfa 100644 (file)
 struct kernel_pkey_query;
 struct kernel_pkey_params;
 
-/*
- * key under-construction record
- * - passed to the request_key actor if supplied
- */
-struct key_construction {
-       struct key      *key;   /* key being constructed */
-       struct key      *authkey;/* authorisation for key being constructed */
-};
-
 /*
  * Pre-parsed payload, used by key add, update and instantiate.
  *
@@ -50,8 +41,7 @@ struct key_preparsed_payload {
        time64_t        expiry;         /* Expiry time of key */
 } __randomize_layout;
 
-typedef int (*request_key_actor_t)(struct key_construction *key,
-                                  const char *op, void *aux);
+typedef int (*request_key_actor_t)(struct key *auth_key, void *aux);
 
 /*
  * Preparsed matching criterion.
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key,
                                    const void *data,
                                    size_t datalen,
                                    struct key *keyring,
-                                   struct key *instkey);
+                                   struct key *authkey);
 extern int key_reject_and_link(struct key *key,
                               unsigned timeout,
                               unsigned error,
                               struct key *keyring,
-                              struct key *instkey);
-extern void complete_request_key(struct key_construction *cons, int error);
+                              struct key *authkey);
+extern void complete_request_key(struct key *authkey, int error);
 
 static inline int key_negate_and_link(struct key *key,
                                      unsigned timeout,
                                      struct key *keyring,
-                                     struct key *instkey)
+                                     struct key *authkey)
 {
-       return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+       return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey);
 }
 
 extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
index 2b2a6dce16301d4d9b683dad7503b383d8708500..4c76fe2c84880fa24bebc7826d19801a10466bfd 100644 (file)
@@ -11,6 +11,8 @@
 #define _LINUX_NETDEV_FEATURES_H
 
 #include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
 
 typedef u64 netdev_features_t;
 
@@ -154,8 +156,26 @@ enum {
 #define NETIF_F_HW_TLS_TX      __NETIF_F(HW_TLS_TX)
 #define NETIF_F_HW_TLS_RX      __NETIF_F(HW_TLS_RX)
 
-#define for_each_netdev_feature(mask_addr, bit)        \
-       for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+       /* like BITMAP_LAST_WORD_MASK() for u64
+        * this sets the most significant 64 - start to 0.
+        */
+       feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+       return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit)                                \
+       for ((bit) = find_next_netdev_feature((mask_addr),              \
+                                             NETDEV_FEATURE_COUNT);    \
+            (bit) >= 0;                                                \
+            (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
index 86dbb3e29139591d046602003cd7cb115f8b7e1a..848b54b7ec9146eac22265d56de2c44de2671d4b 100644 (file)
@@ -3861,7 +3861,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
        if (debug_value == 0)   /* no output */
                return 0;
        /* set low N bits */
-       return (1 << debug_value) - 1;
+       return (1U << debug_value) - 1;
 }
 
 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
index 127fcc9c37781564d72978ad0626e5d4c19cf740..333b56d8f746368623ae738b554ea8c37b54f503 100644 (file)
@@ -992,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev)
 {
        return 0;
 }
+static inline int genphy_no_ack_interrupt(struct phy_device *phydev)
+{
+       return 0;
+}
+static inline int genphy_no_config_intr(struct phy_device *phydev)
+{
+       return 0;
+}
 int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
                                u16 regnum);
 int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
index bba3afb4e9bf8a782200246ebe8d019be5c7b700..f9b43c989577fdf50da7147ee28a7b0a8c4e24ad 100644 (file)
@@ -739,12 +739,6 @@ struct task_struct {
        unsigned                        use_memdelay:1;
 #endif
 
-       /*
-        * May usercopy functions fault on kernel addresses?
-        * This is not just a single bit because this can potentially nest.
-        */
-       unsigned int                    kernel_uaccess_faults_ok;
-
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
        struct restart_block            restart_block;
index 95d25b010a257f2fbb385f8fe8df719ac6d357f4..bdb9563c64a016e1e99c58c65b410a1aea0f0fc9 100644 (file)
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
 
        if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
                skb_set_transport_header(skb, keys.control.thoff);
-       else
+       else if (offset_hint >= 0)
                skb_set_transport_header(skb, offset_hint);
 }
 
@@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
 }
 
+static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
+{
+       return skb_is_gso(skb) &&
+              skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
+}
+
 static inline void skb_gso_reset(struct sk_buff *skb)
 {
        skb_shinfo(skb)->gso_size = 0;
index cb462f9ab7dd592bc1d613c86aefeb787cdd9321..e0348cb0a1dd7d2f3320e58a7ec1cc76e4a8799b 100644 (file)
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
+       } else {
+               /* gso packets without NEEDS_CSUM do not set transport_offset.
+                * probe and drop if does not match one of the above types.
+                */
+               if (gso_type && skb->network_header) {
+                       if (!skb->protocol)
+                               virtio_net_hdr_set_proto(skb, hdr);
+retry:
+                       skb_probe_transport_header(skb, -1);
+                       if (!skb_transport_header_was_set(skb)) {
+                               /* UFO does not specify ipv4 or 6: try both */
+                               if (gso_type & SKB_GSO_UDP &&
+                                   skb->protocol == htons(ETH_P_IP)) {
+                                       skb->protocol = htons(ETH_P_IPV6);
+                                       goto retry;
+                               }
+                               return -EINVAL;
+                       }
+               }
        }
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
index 6ac3a5bd0117cda508f379c001351e20c63ef727..e0f709d26dde4eb7547d43d3ec2bf8f83bfb249d 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <net/inet_sock.h>
 #include <net/snmp.h>
+#include <net/ip.h>
 
 struct icmp_err {
   int          errno;
@@ -39,7 +40,13 @@ struct net_proto_family;
 struct sk_buff;
 struct net;
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+                const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+       __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
 int icmp_rcv(struct sk_buff *skb);
 int icmp_err(struct sk_buff *skb, u32 info);
 int icmp_init(void);
index 8866bfce61214b6af1c3c59b4885c8c39e8e0e90..be3cad9c2e4c37b282e5c2d0e2ef5f05a79b7438 100644 (file)
@@ -667,6 +667,8 @@ static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
 }
 
 void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+                        struct sk_buff *skb, __be32 *info);
 int ip_options_compile(struct net *net, struct ip_options *opt,
                       struct sk_buff *skb);
 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -716,7 +718,7 @@ extern int sysctl_icmp_msgs_burst;
 int ip_misc_proc_init(void);
 #endif
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
                                struct netlink_ext_ack *extack);
 
 #endif /* _IP_H */
index b669fe6dbc3bad2a6d61b7a2b9d54ff83b7e51c8..98f31c7ea23df92e82f00c251bdeca684180b1b5 100644 (file)
@@ -63,10 +63,11 @@ struct pnpipehdr {
                u8              state_after_reset;      /* reset request */
                u8              error_code;             /* any response */
                u8              pep_type;               /* status indication */
-               u8              data[1];
+               u8              data0;                  /* anything else */
        };
+       u8                      data[];
 };
-#define other_pep_type         data[1]
+#define other_pep_type         data[0]
 
 static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
 {
index 7298a53b970296d0860956645d9c47b45d32300f..85386becbaea211504eaeae6a549e96d204afc75 100644 (file)
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
                xfrm_pol_put(pols[i]);
 }
 
-void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *, bool);
 
 static inline void __xfrm_state_put(struct xfrm_state *x)
 {
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x)
 static inline void xfrm_state_put(struct xfrm_state *x)
 {
        if (refcount_dec_and_test(&x->refcnt))
-               __xfrm_state_destroy(x);
+               __xfrm_state_destroy(x, false);
+}
+
+static inline void xfrm_state_put_sync(struct xfrm_state *x)
+{
+       if (refcount_dec_and_test(&x->refcnt))
+               __xfrm_state_destroy(x, true);
 }
 
 static inline void xfrm_state_hold(struct xfrm_state *x)
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo {
 
 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
 int xfrm_state_delete(struct xfrm_state *x);
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
index 2acbc8bc465b5f7dec98a0d687671f3317763259..4a53f6cfa0341590c3be9b84bb88d49b2d5d4994 100644 (file)
@@ -272,13 +272,14 @@ union drm_amdgpu_vm {
 
 /* sched ioctl */
 #define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE      1
+#define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE      2
 
 struct drm_amdgpu_sched_in {
        /* AMDGPU_SCHED_OP_* */
        __u32   op;
        __u32   fd;
        __s32   priority;
-       __u32   flags;
+       __u32   ctx_id;
 };
 
 union drm_amdgpu_sched {
index e582e8e7527afc9c7c37eb9bfaf6d070b807045b..b80b85f0d9d8c9cc5aa0854d9be50377cab3e516 100644 (file)
@@ -348,6 +348,7 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
                              unsigned int axi_id,  unsigned int width,
                              unsigned int height, unsigned int stride,
                              u32 format, uint64_t modifier, unsigned long *eba);
+bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan);
 
 /*
  * IPU CMOS Sensor Interface (csi) functions
index 7cea802d00efa3bd7c676af35b7ab676dd1a00a6..fca899622937f48be0b47b25479fe3bff2252fcf 100644 (file)
@@ -550,6 +550,7 @@ skip:
        initrd_end = 0;
 }
 
+#ifdef CONFIG_BLK_DEV_RAM
 #define BUF_SIZE 1024
 static void __init clean_rootfs(void)
 {
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void)
        ksys_close(fd);
        kfree(buf);
 }
+#endif
 
 static int __init populate_rootfs(void)
 {
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void)
                printk(KERN_INFO "Unpacking initramfs...\n");
                err = unpack_to_rootfs((char *)initrd_start,
                        initrd_end - initrd_start);
-               if (err) {
+               if (err)
                        printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
-                       clean_rootfs();
-               }
                free_initrd();
 #endif
        }
index abf1002080dfb1bc72c25e167eee425fc64da2af..93a5cbbde421c346e72b10cd56e04a13888d9e7b 100644 (file)
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
        }
 
        if (!node || node->prefixlen != key->prefixlen ||
+           node->prefixlen != matchlen ||
            (node->flags & LPM_TREE_NODE_FLAG_IM)) {
                ret = -ENOENT;
                goto out;
index d43b145358275d314d02939f674a2ecc719852b2..950ab2f28922e3cbc341700f6d67d645d5185d73 100644 (file)
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
        struct stack_map_irq_work *work;
 
        work = container_of(entry, struct stack_map_irq_work, irq_work);
-       up_read(work->sem);
+       up_read_non_owner(work->sem);
        work->sem = NULL;
 }
 
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        } else {
                work->sem = &current->mm->mmap_sem;
                irq_work_queue(&work->irq_work);
+               /*
+                * The irq_work will release the mmap_sem with
+                * up_read_non_owner(). The rwsem_release() is called
+                * here to release the lock from lockdep's perspective.
+                */
+               rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
        }
 }
 
index 8577bb7f8be6739f143667af19f916b04de01126..84470d1480aa4c4cfc4a5df70e4849d426bf922b 100644 (file)
@@ -559,12 +559,12 @@ static int map_create(union bpf_attr *attr)
        err = bpf_map_new_fd(map, f_flags);
        if (err < 0) {
                /* failed to allocate fd.
-                * bpf_map_put() is needed because the above
+                * bpf_map_put_with_uref() is needed because the above
                 * bpf_map_alloc_id() has published the map
                 * to the userspace and the userspace may
                 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
                 */
-               bpf_map_put(map);
+               bpf_map_put_with_uref(map);
                return err;
        }
 
@@ -1986,7 +1986,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 
        fd = bpf_map_new_fd(map, f_flags);
        if (fd < 0)
-               bpf_map_put(map);
+               bpf_map_put_with_uref(map);
 
        return fd;
 }
index 56674a7c377884b17d8296b6cf6f6e31a8158916..5fcce2f4209dd403d0571c41b5a458516f5e773e 100644 (file)
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
        return 0;
 }
 
-static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
-                            int size, enum bpf_access_type t)
+static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
+                            u32 regno, int off, int size,
+                            enum bpf_access_type t)
 {
        struct bpf_reg_state *regs = cur_regs(env);
        struct bpf_reg_state *reg = &regs[regno];
-       struct bpf_insn_access_aux info;
+       struct bpf_insn_access_aux info = {};
 
        if (reg->smin_value < 0) {
                verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
                return -EACCES;
        }
 
+       env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+
        return 0;
 }
 
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                        verbose(env, "cannot write into socket\n");
                        return -EACCES;
                }
-               err = check_sock_access(env, regno, off, size, t);
+               err = check_sock_access(env, insn_idx, regno, off, size, t);
                if (!err && value_regno >= 0)
                        mark_reg_unknown(env, regs, value_regno);
        } else {
@@ -6917,7 +6920,8 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        u32 off_reg;
 
                        aux = &env->insn_aux_data[i + delta];
-                       if (!aux->alu_state)
+                       if (!aux->alu_state ||
+                           aux->alu_state == BPF_ALU_NON_POINTER)
                                continue;
 
                        isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
index c3484785b17958df85b11ca2823a1f4a4d41f59b..0e97ca9306efc164ada86b6c6bd73506bfdb706e 100644 (file)
@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
        expires = group->next_update;
        if (now < expires)
                goto out;
-       if (now - expires > psi_period)
+       if (now - expires >= psi_period)
                missed_periods = div_u64(now - expires, psi_period);
 
        /*
index c521b7347482509e3d862f7bffbcf095b8177fd8..c4238b441624415cfd6113bb36ea5d71b6eaaa54 100644 (file)
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
        const char tgid_space[] = "          ";
        const char space[] = "  ";
 
+       print_event_info(buf, m);
+
        seq_printf(m, "#                          %s  _-----=> irqs-off\n",
                   tgid ? tgid_space : space);
        seq_printf(m, "#                          %s / _----=> need-resched\n",
index d5fb09ebba8b79da9e82bc6d23fc48e8d1a02274..9eaf07f99212f797df29fb5f0b93f475c48f441f 100644 (file)
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
 static nokprobe_inline int
 fetch_store_strlen(unsigned long addr)
 {
-       mm_segment_t old_fs;
        int ret, len = 0;
        u8 c;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       pagefault_disable();
-
        do {
-               ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
+               ret = probe_mem_read(&c, (u8 *)addr + len, 1);
                len++;
        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 
-       pagefault_enable();
-       set_fs(old_fs);
-
        return (ret < 0) ? ret : len;
 }
 
index d8c474b6691e92c6a2d26519c5f59cb4b4d04afc..9737059ec58bb213fff5f80b0d34d39395c49904 100644 (file)
@@ -113,6 +113,28 @@ config KASAN_INLINE
 
 endchoice
 
+config KASAN_STACK_ENABLE
+       bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+       default !(CLANG_VERSION < 90000)
+       depends on KASAN
+       help
+         The LLVM stack address sanitizer has a know problem that
+         causes excessive stack usage in a lot of functions, see
+         https://bugs.llvm.org/show_bug.cgi?id=38809
+         Disabling asan-stack makes it safe to run kernels build
+         with clang-8 with KASAN enabled, though it loses some of
+         the functionality.
+         This feature is always disabled when compile-testing with clang-8
+         or earlier to avoid cluttering the output in stack overflow
+         warnings, but clang-8 users can still enable it for builds without
+         CONFIG_COMPILE_TEST.  On gcc and later clang versions it is
+         assumed to always be safe to use and enabled by default.
+
+config KASAN_STACK
+       int
+       default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+       default 0
+
 config KASAN_S390_4_LEVEL_PAGING
        bool "KASan: use 4-level paging"
        depends on KASAN && S390
index c6659cb370331fa8afed30ee366b9b2432b9c6f0..59875eb278ea55f2683955dae36f7ebcbdc43bcd 100644 (file)
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
                new_s0->index_key[i] =
                        ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
 
-       blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
-       pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
-       new_s0->index_key[keylen - 1] &= ~blank;
+       if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+               blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+               pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+               new_s0->index_key[keylen - 1] &= ~blank;
+       }
 
        /* This now reduces to a node splitting exercise for which we'll need
         * to regenerate the disparity table.
index 0abb987dad9b3d697f252469d2111dc61f530913..1611cf00a13750e17694d7d71cdd27acd725d50f 100644 (file)
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
-       struct address_space *mapping = page_mapping(page);
+       struct address_space *mapping;
        bool page_poisoned = PagePoisoned(page);
        int mapcount;
 
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason)
                goto hex_only;
        }
 
+       mapping = page_mapping(page);
+
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
index afef61656c1e1ac7bdaf6eb5babf9d040ed51805..8dfdffc34a99bcb099742fd9614196fd3e1f685f 100644 (file)
@@ -3624,7 +3624,6 @@ retry_avoidcopy:
        copy_user_huge_page(new_page, old_page, address, vma,
                            pages_per_huge_page(h));
        __SetPageUptodate(new_page);
-       set_page_huge_active(new_page);
 
        mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
        mmu_notifier_invalidate_range_start(&range);
@@ -3645,6 +3644,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
+               set_page_huge_active(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -3729,6 +3729,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
        pte_t new_pte;
        spinlock_t *ptl;
        unsigned long haddr = address & huge_page_mask(h);
+       bool new_page = false;
 
        /*
         * Currently, we are forced to kill the process in the event the
@@ -3790,7 +3791,7 @@ retry:
                }
                clear_huge_page(page, address, pages_per_huge_page(h));
                __SetPageUptodate(page);
-               set_page_huge_active(page);
+               new_page = true;
 
                if (vma->vm_flags & VM_MAYSHARE) {
                        int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3861,6 +3862,15 @@ retry:
        }
 
        spin_unlock(ptl);
+
+       /*
+        * Only make newly allocated pages active.  Existing pages found
+        * in the pagecache could be !page_huge_active() if they have been
+        * isolated for migration.
+        */
+       if (new_page)
+               set_page_huge_active(page);
+
        unlock_page(page);
 out:
        return ret;
@@ -4095,7 +4105,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
         * the set_pte_at() write.
         */
        __SetPageUptodate(page);
-       set_page_huge_active(page);
 
        mapping = dst_vma->vm_file->f_mapping;
        idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4163,6 +4172,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
+       set_page_huge_active(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
index e2bb06c1b45e9e1583b0407c29dd75370f5251e1..5d1065efbd4769151a5ea5f3540f94d2dad7b63c 100644 (file)
@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n
 
 CFLAGS_REMOVE_common.o = -pg
 CFLAGS_REMOVE_generic.o = -pg
+CFLAGS_REMOVE_tags.o = -pg
+
 # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 
index 73c9cbfdedf4685de5334f7bd43db708e4c3fc68..09b534fbba17f647ecf0652cbbd1e9f82fce57f6 100644 (file)
@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
  *    get different tags.
  */
 static u8 assign_tag(struct kmem_cache *cache, const void *object,
-                       bool init, bool krealloc)
+                       bool init, bool keep_tag)
 {
-       /* Reuse the same tag for krealloc'ed objects. */
-       if (krealloc)
+       /*
+        * 1. When an object is kmalloc()'ed, two hooks are called:
+        *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
+        *    tag only in the first one.
+        * 2. We reuse the same tag for krealloc'ed objects.
+        */
+       if (keep_tag)
                return get_tag(object);
 
        /*
@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
        return (void *)object;
 }
 
-void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
-                                       gfp_t flags)
-{
-       return kasan_kmalloc(cache, object, cache->object_size, flags);
-}
-
 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
 {
        if (IS_ENABLED(CONFIG_KASAN_GENERIC))
@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 }
 
 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
-                               size_t size, gfp_t flags, bool krealloc)
+                               size_t size, gfp_t flags, bool keep_tag)
 {
        unsigned long redzone_start;
        unsigned long redzone_end;
@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
                                KASAN_SHADOW_SCALE_SIZE);
 
        if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
-               tag = assign_tag(cache, object, false, krealloc);
+               tag = assign_tag(cache, object, false, keep_tag);
 
        /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
        kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
        return set_tag(object, tag);
 }
 
+void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
+                                       gfp_t flags)
+{
+       return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
+}
+
 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
                                size_t size, gfp_t flags)
 {
-       return __kasan_kmalloc(cache, object, size, flags, false);
+       return __kasan_kmalloc(cache, object, size, flags, true);
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
index 0777649e07c44f34125d6636beb4844fcac00bdb..63fca317265997166716842611daba70cdf7191c 100644 (file)
@@ -46,7 +46,7 @@ void kasan_init_tags(void)
        int cpu;
 
        for_each_possible_cpu(cpu)
-               per_cpu(prng_state, cpu) = get_random_u32();
+               per_cpu(prng_state, cpu) = (u32)get_cycles();
 }
 
 /*
index f9d9dc250428142569672a5a5e7c56d8ff949e90..707fa5579f66f1e1e96a5613e50ff74b92417954 100644 (file)
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        unsigned long flags;
        struct kmemleak_object *object, *parent;
        struct rb_node **link, *rb_parent;
+       unsigned long untagged_ptr;
 
        object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
        if (!object) {
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 
        write_lock_irqsave(&kmemleak_lock, flags);
 
-       min_addr = min(min_addr, ptr);
-       max_addr = max(max_addr, ptr + size);
+       untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
+       min_addr = min(min_addr, untagged_ptr);
+       max_addr = max(max_addr, untagged_ptr + size);
        link = &object_tree_root.rb_node;
        rb_parent = NULL;
        while (*link) {
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end,
        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
        unsigned long flags;
+       unsigned long untagged_ptr;
 
        read_lock_irqsave(&kmemleak_lock, flags);
        for (ptr = start; ptr < end; ptr++) {
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end,
                pointer = *ptr;
                kasan_enable_current();
 
-               if (pointer < min_addr || pointer >= max_addr)
+               untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
+               if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
                        continue;
 
                /*
index f3416632e5a4137c960434c9a59f92a79e0e0204..ec00be51a24fd6a9639897fafeea3abd45b9d3f4 100644 (file)
@@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
 
        set_fs(KERNEL_DS);
        pagefault_disable();
-       current->kernel_uaccess_faults_ok++;
        ret = __copy_from_user_inatomic(dst,
                        (__force const void __user *)src, size);
-       current->kernel_uaccess_faults_ok--;
        pagefault_enable();
        set_fs(old_fs);
 
@@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
 
        set_fs(KERNEL_DS);
        pagefault_disable();
-       current->kernel_uaccess_faults_ok++;
        ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
-       current->kernel_uaccess_faults_ok--;
        pagefault_enable();
        set_fs(old_fs);
 
@@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
 
        set_fs(KERNEL_DS);
        pagefault_disable();
-       current->kernel_uaccess_faults_ok++;
 
        do {
                ret = __get_user(*dst++, (const char __user __force *)src++);
        } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
 
-       current->kernel_uaccess_faults_ok--;
        dst[-1] = '\0';
        pagefault_enable();
        set_fs(old_fs);
index 124e794867c564c693e80a2b5827aebe3d3643d3..1ad28323fb9faceabb98cfc9f1b3834b365919bb 100644 (file)
@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page)
        return PageBuddy(page) && page_order(page) >= pageblock_order;
 }
 
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
 {
+       struct page *page = pfn_to_page(pfn);
+
        /* Ensure the starting page is pageblock-aligned */
-       BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+       BUG_ON(pfn & (pageblock_nr_pages - 1));
 
        /* If the entire pageblock is free, move to the end of free page */
        if (pageblock_free(page)) {
@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page)
                /* be careful. we don't have locks, page_order can be changed.*/
                order = page_order(page);
                if ((order < MAX_ORDER) && (order >= pageblock_order))
-                       return page + (1 << order);
+                       return pfn + (1 << order);
        }
 
-       return page + pageblock_nr_pages;
+       return pfn + pageblock_nr_pages;
 }
 
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
 {
+       struct page *page = pfn_to_page(pfn);
        struct zone *zone;
-       unsigned long pfn;
 
        /*
         * We have to be careful here because we are iterating over memory
@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
 /* Checks if this range of memory is likely to be hot-removable. */
 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
-       struct page *page = pfn_to_page(start_pfn);
-       unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
-       struct page *end_page = pfn_to_page(end_pfn);
+       unsigned long end_pfn, pfn;
+
+       end_pfn = min(start_pfn + nr_pages,
+                       zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
 
        /* Check the starting page of each pageblock within the range */
-       for (; page < end_page; page = next_active_pageblock(page)) {
-               if (!is_pageblock_removable_nolock(page))
+       for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+               if (!is_pageblock_removable_nolock(pfn))
                        return false;
                cond_resched();
        }
index d4496d9d34f533dcd66accb7d92a69a03feae65c..ee2bce59d2bfffb557aecac51245a7fdee950936 100644 (file)
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
                              nodemask_t *nodes)
 {
        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
-       const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+       unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
 
        if (copy > nbytes) {
                if (copy > PAGE_SIZE)
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy,
        int uninitialized_var(pval);
        nodemask_t nodes;
 
-       if (nmask != NULL && maxnode < MAX_NUMNODES)
+       if (nmask != NULL && maxnode < nr_node_ids)
                return -EINVAL;
 
        err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
        unsigned long nr_bits, alloc_size;
        DECLARE_BITMAP(bm, MAX_NUMNODES);
 
-       nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+       nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
 
        if (nmask)
index d4fd680be3b0facd6ca1730612b74372a77a196a..181f5d2718a9b1c31be22dd76758d4693e002e03 100644 (file)
@@ -1315,6 +1315,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                lock_page(hpage);
        }
 
+       /*
+        * Check for pages which are in the process of being freed.  Without
+        * page_mapping() set, hugetlbfs specific move page routine will not
+        * be called and we could leak usage counts for subpools.
+        */
+       if (page_private(hpage) && !page_mapping(hpage)) {
+               rc = -EBUSY;
+               goto out_unlock;
+       }
+
        if (PageAnon(hpage))
                anon_vma = page_get_anon_vma(hpage);
 
@@ -1345,6 +1355,7 @@ put_anon:
                put_new_page = NULL;
        }
 
+out_unlock:
        unlock_page(hpage);
 out:
        if (rc != -EAGAIN)
index f901065c4c64cf63b455497fb993e7c4eacfc184..fc1809b1bed67bcf4e473bc609dcad5072f38b88 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2426,12 +2426,11 @@ int expand_downwards(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        struct vm_area_struct *prev;
-       int error;
+       int error = 0;
 
        address &= PAGE_MASK;
-       error = security_mmap_addr(address);
-       if (error)
-               return error;
+       if (address < mmap_min_addr)
+               return -EPERM;
 
        /* Enforce stack_guard_gap */
        prev = vma->vm_prev;
index 46285d28e43b529b333dbd8c3b3cf0123df66417..0b9f577b1a2aee9f2b959f2d717e936802cbfc75 100644 (file)
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone)
 
        max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
                        watermark_boost_factor, 10000);
+
+       /*
+        * high watermark may be uninitialised if fragmentation occurs
+        * very early in boot so do not boost. We do not fall
+        * through and boost by pageblock_nr_pages as failing
+        * allocations that early means that reclaim is not going
+        * to help and it may even be impossible to reclaim the
+        * boosted watermark resulting in a hang.
+        */
+       if (!max_boost)
+               return;
+
        max_boost = max(pageblock_nr_pages, max_boost);
 
        zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
@@ -4675,11 +4687,11 @@ refill:
                /* Even if we own the page, we do not use atomic_set().
                 * This would break get_page_unless_zero() users.
                 */
-               page_ref_add(page, size);
+               page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
 
                /* reset page count bias and offset to start of new frag */
                nc->pfmemalloc = page_is_pfmemalloc(page);
-               nc->pagecnt_bias = size + 1;
+               nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
                nc->offset = size;
        }
 
@@ -4695,10 +4707,10 @@ refill:
                size = nc->size;
 #endif
                /* OK, page count is 0, we can safely set it */
-               set_page_count(page, size + 1);
+               set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
 
                /* reset page count bias and offset to start of new frag */
-               nc->pagecnt_bias = size + 1;
+               nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
                offset = size - fragsz;
        }
 
index 6ece1e2fe76eb6b6670fc95fc9cf50d294e821cc..2c012eee133d1f13dc3fe8b7112278dfc02e8ceb 100644 (file)
@@ -2848,16 +2848,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
        struct inode *inode = d_inode(old_dentry);
-       int ret;
+       int ret = 0;
 
        /*
         * No ordinary (disk based) filesystem counts links as inodes;
         * but each new link needs a new dentry, pinning lowmem, and
         * tmpfs dentries cannot be pruned until they are unlinked.
+        * But if an O_TMPFILE file is linked into the tmpfs, the
+        * first link must skip that, to get the accounting right.
         */
-       ret = shmem_reserve_inode(inode->i_sb);
-       if (ret)
-               goto out;
+       if (inode->i_nlink) {
+               ret = shmem_reserve_inode(inode->i_sb);
+               if (ret)
+                       goto out;
+       }
 
        dir->i_size += BOGO_DIRENT_SIZE;
        inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
index 78eb8c5bf4e4ca126dbba917904216ab27fdf72f..91c1863df93dbb155f8554c79799aa8d32ce4ee2 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
        void *freelist;
        void *addr = page_address(page);
 
-       page->s_mem = kasan_reset_tag(addr) + colour_off;
+       page->s_mem = addr + colour_off;
        page->active = 0;
 
        if (OBJFREELIST_SLAB(cachep))
@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
                /* Slab management obj is off-slab. */
                freelist = kmem_cache_alloc_node(cachep->freelist_cache,
                                              local_flags, nodeid);
+               freelist = kasan_reset_tag(freelist);
                if (!freelist)
                        return NULL;
        } else {
@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
 
        offset *= cachep->colour_off;
 
+       /*
+        * Call kasan_poison_slab() before calling alloc_slabmgmt(), so
+        * page_address() in the latter returns a non-tagged pointer,
+        * as it should be for slab pages.
+        */
+       kasan_poison_slab(page);
+
        /* Get slab management. */
        freelist = alloc_slabmgmt(cachep, page, offset,
                        local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
 
        slab_map_pages(cachep, page, freelist);
 
-       kasan_poison_slab(page);
        cache_init_objs(cachep, page);
 
        if (gfpflags_allow_blocking(local_flags))
@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
        void *ret = slab_alloc(cachep, flags, _RET_IP_);
 
-       ret = kasan_slab_alloc(cachep, ret, flags);
        trace_kmem_cache_alloc(_RET_IP_, ret,
                               cachep->object_size, cachep->size, flags);
 
@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
 
-       ret = kasan_slab_alloc(cachep, ret, flags);
        trace_kmem_cache_alloc_node(_RET_IP_, ret,
                                    cachep->object_size, cachep->size,
                                    flags, nodeid);
@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
        unsigned int objnr;
        unsigned long offset;
 
+       ptr = kasan_reset_tag(ptr);
+
        /* Find and validate object. */
        cachep = page->slab_cache;
        objnr = obj_to_index(cachep, page, (void *)ptr);
index 4190c24ef0e9dfcce2463f950f750e6c251d7bac..3841053187793cce2963ee1cd5f3e680271d21de 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
 
        flags &= gfp_allowed_mask;
        for (i = 0; i < size; i++) {
-               void *object = p[i];
-
-               kmemleak_alloc_recursive(object, s->object_size, 1,
+               p[i] = kasan_slab_alloc(s, p[i], flags);
+               /* As p[i] might get tagged, call kmemleak hook after KASAN. */
+               kmemleak_alloc_recursive(p[i], s->object_size, 1,
                                         s->flags, flags);
-               p[i] = kasan_slab_alloc(s, object, flags);
        }
 
        if (memcg_kmem_enabled())
index 81732d05e74a8f54f8671a80fe942ddc103afea4..f9d89c1b5977c03e9e542c9dd48ab38dfa25962e 100644 (file)
@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
        flags |= __GFP_COMP;
        page = alloc_pages(flags, order);
        ret = page ? page_address(page) : NULL;
-       kmemleak_alloc(ret, size, 1, flags);
        ret = kasan_kmalloc_large(ret, size, flags);
+       /* As ret might get tagged, call kmemleak hook after KASAN. */
+       kmemleak_alloc(ret, size, 1, flags);
        return ret;
 }
 EXPORT_SYMBOL(kmalloc_order);
index 1e3d0ec4e2007b6c68fdc92d1cff5e5eebd3d266..dc777761b6b70d32ca8c0479b85a0ecde0b00b88 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
                                 unsigned long ptr_addr)
 {
 #ifdef CONFIG_SLAB_FREELIST_HARDENED
-       return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
+       /*
+        * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
+        * Normally, this doesn't cause any issues, as both set_freepointer()
+        * and get_freepointer() are called with a pointer with the same tag.
+        * However, there are some issues with CONFIG_SLUB_DEBUG code. For
+        * example, when __free_slub() iterates over objects in a cache, it
+        * passes untagged pointers to check_object(). check_object() in turns
+        * calls get_freepointer() with an untagged pointer, which causes the
+        * freepointer to be restored incorrectly.
+        */
+       return (void *)((unsigned long)ptr ^ s->random ^
+                       (unsigned long)kasan_reset_tag((void *)ptr_addr));
 #else
        return ptr;
 #endif
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
                __p < (__addr) + (__objects) * (__s)->size; \
                __p += (__s)->size)
 
-#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
-       for (__p = fixup_red_left(__s, __addr), __idx = 1; \
-               __idx <= __objects; \
-               __p += (__s)->size, __idx++)
-
 /* Determine object index from a given position */
 static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
 {
-       return (p - addr) / s->size;
+       return (kasan_reset_tag(p) - addr) / s->size;
 }
 
 static inline unsigned int order_objects(unsigned int order, unsigned int size)
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
                return 1;
 
        base = page_address(page);
+       object = kasan_reset_tag(object);
        object = restore_red_left(s, object);
        if (object < base || object >= base + page->objects * s->size ||
                (object - base) % s->size) {
@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
        init_tracking(s, object);
 }
 
+static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
+{
+       if (!(s->flags & SLAB_POISON))
+               return;
+
+       metadata_access_enable();
+       memset(addr, POISON_INUSE, PAGE_SIZE << order);
+       metadata_access_disable();
+}
+
 static inline int alloc_consistency_checks(struct kmem_cache *s,
                                        struct page *page,
                                        void *object, unsigned long addr)
@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
 #else /* !CONFIG_SLUB_DEBUG */
 static inline void setup_object_debug(struct kmem_cache *s,
                        struct page *page, void *object) {}
+static inline void setup_page_debug(struct kmem_cache *s,
+                       void *addr, int order) {}
 
 static inline int alloc_debug_processing(struct kmem_cache *s,
        struct page *page, void *object, unsigned long addr) { return 0; }
@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
  */
 static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 {
+       ptr = kasan_kmalloc_large(ptr, size, flags);
+       /* As ptr might get tagged, call kmemleak hook after KASAN. */
        kmemleak_alloc(ptr, size, 1, flags);
-       return kasan_kmalloc_large(ptr, size, flags);
+       return ptr;
 }
 
 static __always_inline void kfree_hook(void *x)
@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        if (page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
+       kasan_poison_slab(page);
+
        start = page_address(page);
 
-       if (unlikely(s->flags & SLAB_POISON))
-               memset(start, POISON_INUSE, PAGE_SIZE << order);
-
-       kasan_poison_slab(page);
+       setup_page_debug(s, start, order);
 
        shuffle = shuffle_freelist(s, page);
 
        if (!shuffle) {
-               for_each_object_idx(p, idx, s, start, page->objects) {
-                       if (likely(idx < page->objects)) {
-                               next = p + s->size;
-                               next = setup_object(s, page, next);
-                               set_freepointer(s, p, next);
-                       } else
-                               set_freepointer(s, p, NULL);
-               }
                start = fixup_red_left(s, start);
                start = setup_object(s, page, start);
                page->freelist = start;
+               for (idx = 0, p = start; idx < page->objects - 1; idx++) {
+                       next = p + s->size;
+                       next = setup_object(s, page, next);
+                       set_freepointer(s, p, next);
+                       p = next;
+               }
+               set_freepointer(s, p, NULL);
        }
 
        page->inuse = page->objects;
index 4929bc1be60efaac6276288540cd61c244244753..4d7d37eb3c40ba09e12941ae3898985e0bfc5889 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu)
 {
 }
 
-static bool need_activate_page_drain(int cpu)
-{
-       return false;
-}
-
 void activate_page(struct page *page)
 {
        struct zone *zone = page_zone(page);
@@ -653,13 +648,15 @@ void lru_add_drain(void)
        put_cpu();
 }
 
+#ifdef CONFIG_SMP
+
+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
 }
 
-static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-
 /*
  * Doesn't need any cpu hotplug locking because we do rely on per-cpu
  * kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -702,6 +699,12 @@ void lru_add_drain_all(void)
 
        mutex_unlock(&lock);
 }
+#else
+void lru_add_drain_all(void)
+{
+       lru_add_drain();
+}
+#endif
 
 /**
  * release_pages - batched put_page()
index 1ea0551380435edf79df72cd11ea6f27bec1495e..379319b1bcfd8cd81c6c543b00ee4b40c1fe9302 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len)
 {
        void *p;
 
-       p = kmalloc_track_caller(len, GFP_USER);
+       p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
        if (!p)
                return ERR_PTR(-ENOMEM);
 
index fa2644d276ef1134bc3b41ce02b70bfdd8a678fa..e31e1b20f7f4dea446ec596ae661f7beb080f4ee 100644 (file)
 #include <net/sock.h>
 #include <net/tcp.h>
 
-static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
-               struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
-       u32 ret;
-
-       preempt_disable();
-       rcu_read_lock();
-       bpf_cgroup_storage_set(storage);
-       ret = BPF_PROG_RUN(prog, ctx);
-       rcu_read_unlock();
-       preempt_enable();
-
-       return ret;
-}
-
-static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
-                       u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
+                       u32 *retval, u32 *time)
 {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
        enum bpf_cgroup_storage_type stype;
        u64 time_start, time_spent = 0;
+       int ret = 0;
        u32 i;
 
        for_each_cgroup_storage_type(stype) {
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
 
        if (!repeat)
                repeat = 1;
+
+       rcu_read_lock();
+       preempt_disable();
        time_start = ktime_get_ns();
        for (i = 0; i < repeat; i++) {
-               *ret = bpf_test_run_one(prog, ctx, storage);
+               bpf_cgroup_storage_set(storage);
+               *retval = BPF_PROG_RUN(prog, ctx);
+
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
                if (need_resched()) {
-                       if (signal_pending(current))
-                               break;
                        time_spent += ktime_get_ns() - time_start;
+                       preempt_enable();
+                       rcu_read_unlock();
+
                        cond_resched();
+
+                       rcu_read_lock();
+                       preempt_disable();
                        time_start = ktime_get_ns();
                }
        }
        time_spent += ktime_get_ns() - time_start;
+       preempt_enable();
+       rcu_read_unlock();
+
        do_div(time_spent, repeat);
        *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 
        for_each_cgroup_storage_type(stype)
                bpf_cgroup_storage_free(storage[stype]);
 
-       return 0;
+       return ret;
 }
 
 static int bpf_test_finish(const union bpf_attr *kattr,
index 3aeff0895669609b753607abb362fc7bbbb7f28a..ac92b2eb32b1acafbdb85cd18f69079d977fa7fb 100644 (file)
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br,
                return;
 
        br_multicast_update_query_timer(br, query, max_delay);
-
-       /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
-        * the arrival port for IGMP Queries where the source address
-        * is 0.0.0.0 should not be added to router port list.
-        */
-       if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
-           saddr->proto == htons(ETH_P_IPV6))
-               br_multicast_mark_router(br, port);
+       br_multicast_mark_router(br, port);
 }
 
 static void br_ip4_multicast_query(struct net_bridge *br,
index 3661cdd927f15fc78a8b1436822924be5d4c8a17..7e71b0df1fbc9185b192a43427c7cb281b778ca1 100644 (file)
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con)
        dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
 
        if (con->auth) {
+               int len = le32_to_cpu(con->in_reply.authorizer_len);
+
                /*
                 * Any connection that defines ->get_authorizer()
                 * should also define ->add_authorizer_challenge() and
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con)
                 */
                if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
                        ret = con->ops->add_authorizer_challenge(
-                                   con, con->auth->authorizer_reply_buf,
-                                   le32_to_cpu(con->in_reply.authorizer_len));
+                                   con, con->auth->authorizer_reply_buf, len);
                        if (ret < 0)
                                return ret;
 
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con)
                        return 0;
                }
 
-               ret = con->ops->verify_authorizer_reply(con);
-               if (ret < 0) {
-                       con->error_msg = "bad authorize reply";
-                       return ret;
+               if (len) {
+                       ret = con->ops->verify_authorizer_reply(con);
+                       if (ret < 0) {
+                               con->error_msg = "bad authorize reply";
+                               return ret;
+                       }
                }
        }
 
index 959d1c51826d8b18765bce50b4378f177e912797..3d348198004ffe4596f09e96c509e7c153dd7a80 100644 (file)
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname,
                                   char __user *optval, unsigned int optlen)
 {
        int err;
-       struct socket *sock = sockfd_lookup(fd, &err);
+       struct socket *sock;
+
+       if (optlen > INT_MAX)
+               return -EINVAL;
 
+       sock = sockfd_lookup(fd, &err);
        if (sock) {
                err = security_socket_setsockopt(sock, level, optname);
                if (err) {
index 8e276e0192a11add47992a6cca955e8eb13e66d8..5d03889502eb6ebf04ad1315d73f00586912cbc7 100644 (file)
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
        netdev_features_t feature;
        int feature_bit;
 
-       for_each_netdev_feature(&upper_disables, feature_bit) {
+       for_each_netdev_feature(upper_disables, feature_bit) {
                feature = __NETIF_F_BIT(feature_bit);
                if (!(upper->wanted_features & feature)
                    && (features & feature)) {
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
        netdev_features_t feature;
        int feature_bit;
 
-       for_each_netdev_feature(&upper_disables, feature_bit) {
+       for_each_netdev_feature(upper_disables, feature_bit) {
                feature = __NETIF_F_BIT(feature_bit);
                if (!(features & feature) && (lower->features & feature)) {
                        netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
index 7a54dc11ac2d3c7f4f81d8e9c34a3ae5a8cff4b4..f7d0004fc16096eb42ece3a6acf645540ee2326b 100644 (file)
@@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
        u32 off = skb_mac_header_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_cow(skb, len_diff);
@@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
        u32 off = skb_mac_header_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_unclone(skb, GFP_ATOMIC);
@@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
        u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_cow(skb, len_diff);
@@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
        u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
        int ret;
 
-       /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
-       if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+       if (!skb_is_gso_tcp(skb))
                return -ENOTSUPP;
 
        ret = skb_unclone(skb, GFP_ATOMIC);
index 26d8484849126ea0a418b6290dcdcba30bf44c2f..2415d9cb9b89fefb30a7932a70c3497aeb67c80e 100644 (file)
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
  */
 void *netdev_alloc_frag(unsigned int fragsz)
 {
+       fragsz = SKB_DATA_ALIGN(fragsz);
+
        return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(netdev_alloc_frag);
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 
 void *napi_alloc_frag(unsigned int fragsz)
 {
+       fragsz = SKB_DATA_ALIGN(fragsz);
+
        return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(napi_alloc_frag);
index a1917025e155bab3c96f9995594667c1383562f3..410f19148106a92b9e9eafabfc4bd6efb3c9c22a 100644 (file)
@@ -612,8 +612,8 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
 {
        struct device_node *ports, *port;
        struct dsa_port *dp;
+       int err = 0;
        u32 reg;
-       int err;
 
        ports = of_get_child_by_name(dn, "ports");
        if (!ports) {
@@ -624,19 +624,23 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
        for_each_available_child_of_node(ports, port) {
                err = of_property_read_u32(port, "reg", &reg);
                if (err)
-                       return err;
+                       goto out_put_node;
 
-               if (reg >= ds->num_ports)
-                       return -EINVAL;
+               if (reg >= ds->num_ports) {
+                       err = -EINVAL;
+                       goto out_put_node;
+               }
 
                dp = &ds->ports[reg];
 
                err = dsa_port_parse_of(dp, port);
                if (err)
-                       return err;
+                       goto out_put_node;
        }
 
-       return 0;
+out_put_node:
+       of_node_put(ports);
+       return err;
 }
 
 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
index 2d7e01b23572877423aabd002e7e74315e320882..c2261697ee83e0b1b5e937c2ab87ab40cc1b3d2e 100644 (file)
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
 
 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
 {
-       u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
        int err;
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
                        return err;
        }
 
-       dsa_port_set_state_now(dp, stp_state);
+       if (!dp->bridge_dev)
+               dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
 
        return 0;
 }
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
 
-       dsa_port_set_state_now(dp, BR_STATE_DISABLED);
+       if (!dp->bridge_dev)
+               dsa_port_set_state_now(dp, BR_STATE_DISABLED);
 
        if (ds->ops->port_disable)
                ds->ops->port_disable(ds, port, phy);
@@ -291,6 +292,7 @@ static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
                return ERR_PTR(-EPROBE_DEFER);
        }
 
+       of_node_put(phy_dn);
        return phydev;
 }
 
index 777fa3b7fb13d9f73a71eee2162ce472c1097b92..f0165c5f376b398950db7696e813c94b19034068 100644 (file)
@@ -667,7 +667,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
        case CIPSO_V4_MAP_PASS:
                return 0;
        case CIPSO_V4_MAP_TRANS:
-               if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+               if ((level < doi_def->map.std->lvl.cipso_size) &&
+                   (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
                        return 0;
                break;
        }
@@ -1735,13 +1736,26 @@ validate_return:
  */
 void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
 {
+       unsigned char optbuf[sizeof(struct ip_options) + 40];
+       struct ip_options *opt = (struct ip_options *)optbuf;
+
        if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
                return;
 
+       /*
+        * We might be called above the IP layer,
+        * so we can not use icmp_send and IPCB here.
+        */
+
+       memset(opt, 0, sizeof(struct ip_options));
+       opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+       if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+               return;
+
        if (gateway)
-               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+               __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
        else
-               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+               __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
 }
 
 /**
index 5459f41fc26fa75beeb2800bf55a19a3feda507d..10e809b296ec8644e108923c6faa1e4e2179bc20 100644 (file)
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        skb->len += tailen;
                        skb->data_len += tailen;
                        skb->truesize += tailen;
-                       if (sk)
+                       if (sk && sk_fullsock(sk))
                                refcount_add(tailen, &sk->sk_wmem_alloc);
 
                        goto out;
index fe4f6a62423834a79705e0c01cd1329d9da4f836..ed14ec2455847f2f104cb33fcd6a64cd37b87b40 100644 (file)
@@ -710,6 +710,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                case RTA_GATEWAY:
                        cfg->fc_gw = nla_get_be32(attr);
                        break;
+               case RTA_VIA:
+                       NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+                       err = -EINVAL;
+                       goto errout;
                case RTA_PRIORITY:
                        cfg->fc_priority = nla_get_u32(attr);
                        break;
index 065997f414e61e22a31e663f89bd642800cf72b4..3f24414150e27f2096ced754fab5c2766660a040 100644 (file)
@@ -570,7 +570,8 @@ relookup_failed:
  *                     MUST reply to only the first fragment.
  */
 
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+                const struct ip_options *opt)
 {
        struct iphdr *iph;
        int room;
@@ -691,7 +692,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                                          iph->tos;
        mark = IP4_REPLY_MARK(net, skb_in->mark);
 
-       if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+       if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
                goto out_unlock;
 
 
@@ -742,7 +743,7 @@ out_bh_enable:
        local_bh_enable();
 out:;
 }
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
 
 
 static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
index 3978f807fa8b7c8514f7727174facdb9812c9c59..6ae89f2b541bf2221d2842646f42ea8b05928056 100644 (file)
@@ -1457,9 +1457,23 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct ip_tunnel_parm *p = &t->parms;
        __be16 o_flags = p->o_flags;
 
-       if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
-           !t->collect_md)
-               o_flags |= TUNNEL_KEY;
+       if (t->erspan_ver == 1 || t->erspan_ver == 2) {
+               if (!t->collect_md)
+                       o_flags |= TUNNEL_KEY;
+
+               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+                       goto nla_put_failure;
+
+               if (t->erspan_ver == 1) {
+                       if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
+                               goto nla_put_failure;
+               } else {
+                       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+                               goto nla_put_failure;
+                       if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+                               goto nla_put_failure;
+               }
+       }
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
            nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -1495,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        goto nla_put_failure;
        }
 
-       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
-               goto nla_put_failure;
-
-       if (t->erspan_ver == 1) {
-               if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
-                       goto nla_put_failure;
-       } else if (t->erspan_ver == 2) {
-               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
-                       goto nla_put_failure;
-               if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
-                       goto nla_put_failure;
-       }
-
        return 0;
 
 nla_put_failure:
index 51d8efba6de21c13712d35c6ee37c6a13f6fd5e9..1f4737b77067210df8bb049ebdb7da96222ee9f4 100644 (file)
@@ -307,11 +307,10 @@ drop:
 }
 
 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
-                             struct sk_buff *skb)
+                             struct sk_buff *skb, struct net_device *dev)
 {
        const struct iphdr *iph = ip_hdr(skb);
        int (*edemux)(struct sk_buff *skb);
-       struct net_device *dev = skb->dev;
        struct rtable *rt;
        int err;
 
@@ -400,6 +399,7 @@ drop_error:
 
 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       struct net_device *dev = skb->dev;
        int ret;
 
        /* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        if (!skb)
                return NET_RX_SUCCESS;
 
-       ret = ip_rcv_finish_core(net, sk, skb);
+       ret = ip_rcv_finish_core(net, sk, skb, dev);
        if (ret != NET_RX_DROP)
                ret = dst_input(skb);
        return ret;
@@ -545,6 +545,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
 
        INIT_LIST_HEAD(&sublist);
        list_for_each_entry_safe(skb, next, head, list) {
+               struct net_device *dev = skb->dev;
                struct dst_entry *dst;
 
                skb_list_del_init(skb);
@@ -554,7 +555,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
                skb = l3mdev_ip_rcv(skb);
                if (!skb)
                        continue;
-               if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+               if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
                        continue;
 
                dst = skb_dst(skb);
index ed194d46c00e3292cd741a07cd8954ccdc5a1fd6..32a35043c9f590314b7fa354d5e948b59e665214 100644 (file)
@@ -251,8 +251,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
  * If opt == NULL, then skb->data should point to IP header.
  */
 
-int ip_options_compile(struct net *net,
-                      struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+                        struct ip_options *opt, struct sk_buff *skb,
+                        __be32 *info)
 {
        __be32 spec_dst = htonl(INADDR_ANY);
        unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@ eol:
                return 0;
 
 error:
-       if (skb) {
-               icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
-       }
+       if (info)
+               *info = htonl((pp_ptr-iph)<<24);
        return -EINVAL;
 }
+
+int ip_options_compile(struct net *net,
+                      struct ip_options *opt, struct sk_buff *skb)
+{
+       int ret;
+       __be32 info;
+
+       ret = __ip_options_compile(net, opt, skb, &info);
+       if (ret != 0 && skb)
+               icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+       return ret;
+}
 EXPORT_SYMBOL(ip_options_compile);
 
 /*
index f86bb4f066095a16a0d951a4103c7cd5cee6f683..d8e3a1fb8e8267a12cd484263403d6c7b27e3010 100644 (file)
@@ -3,9 +3,10 @@
 #include <linux/types.h>
 #include <net/net_namespace.h>
 #include <net/netlink.h>
+#include <linux/in6.h>
 #include <net/ip.h>
 
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
                                struct netlink_ext_ack *extack)
 {
        *ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@ int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
        switch (*ip_proto) {
        case IPPROTO_TCP:
        case IPPROTO_UDP:
+               return 0;
        case IPPROTO_ICMP:
+               if (family != AF_INET)
+                       break;
+               return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+       case IPPROTO_ICMPV6:
+               if (family != AF_INET6)
+                       break;
                return 0;
-       default:
-               NL_SET_ERR_MSG(extack, "Unsupported ip proto");
-               return -EOPNOTSUPP;
+#endif
        }
+       NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+       return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
index 5163b64f8fb3e31a8ebb3a0f3d49aad0191d2197..7bb9128c836345d41e74cca4747547459f06ec96 100644 (file)
@@ -2803,7 +2803,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
        if (tb[RTA_IP_PROTO]) {
                err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-                                                 &ip_proto, extack);
+                                                 &ip_proto, AF_INET, extack);
                if (err)
                        return err;
        }
index 2079145a3b7c5f498af429c9a8289342e4421fca..cf3c5095c10e8e7e56621beae2f93c93de184489 100644 (file)
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk)
        sk_mem_reclaim(sk);
        tcp_clear_all_retrans_hints(tcp_sk(sk));
        tcp_sk(sk)->packets_out = 0;
+       inet_csk(sk)->icsk_backoff = 0;
 }
 
 int tcp_disconnect(struct sock *sk, int flags)
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->write_seq += tp->max_window + 2;
        if (tp->write_seq == 0)
                tp->write_seq = 1;
-       icsk->icsk_backoff = 0;
        tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
index efc6fef692ffdca4dcdd3f4b87a837656dd66c8c..ec3cea9d68288244d8e03b655d06f91640c36ee7 100644 (file)
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                if (sock_owned_by_user(sk))
                        break;
 
+               skb = tcp_rtx_queue_head(sk);
+               if (WARN_ON_ONCE(!skb))
+                       break;
+
                icsk->icsk_backoff--;
                icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
                                               TCP_TIMEOUT_INIT;
                icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
 
-               skb = tcp_rtx_queue_head(sk);
 
                tcp_mstamp_refresh(tp);
                delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
index 730bc44dbad9363814705b28c2f91a2253d91207..ccc78f3a4b60d3012430488bdfbcfc5122ff8627 100644 (file)
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                        /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
                        skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
                        list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
+                       tcp_init_tso_segs(skb, mss_now);
                        goto repair; /* Skip network transmission */
                }
 
index 5c3cd5d84a6f6f09cd5744373fee168a10a6baf4..372fdc5381a98e0d8a673ef1649323f91764ad8e 100644 (file)
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
 
        for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
                int (*handler)(struct sk_buff *skb, u32 info);
+               const struct ip_tunnel_encap_ops *encap;
 
-               if (!iptun_encaps[i])
+               encap = rcu_dereference(iptun_encaps[i]);
+               if (!encap)
                        continue;
-               handler = rcu_dereference(iptun_encaps[i]->err_handler);
+               handler = encap->err_handler;
                if (handler && !handler(skb, info))
                        return 0;
        }
index 5afe9f83374de5239ced868cd7a5821e1de391c9..239d4a65ad6ef26988010cfa514491d4bf18f2c7 100644 (file)
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        skb->len += tailen;
                        skb->data_len += tailen;
                        skb->truesize += tailen;
-                       if (sk)
+                       if (sk && sk_fullsock(sk))
                                refcount_add(tailen, &sk->sk_wmem_alloc);
 
                        goto out;
index b858bd5280bf54155f31032313f95aa30373f814..867474abe2698d947b347373a8bc179defec7378 100644 (file)
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 
 static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
                                  struct inet6_skb_parm *opt,
-                                 u8 type, u8 code, int offset, u32 info)
+                                 u8 type, u8 code, int offset, __be32 info)
 {
        const struct inet6_protocol *ipprot;
 
index 801a9a0c217e8746b7dd2850b979702031fa81a3..26f25b6e2833994d50bd4b96e40c97c64ff23e34 100644 (file)
@@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
        return 0;
 }
 
+static void ip6erspan_set_version(struct nlattr *data[],
+                                 struct __ip6_tnl_parm *parms)
+{
+       if (!data)
+               return;
+
+       parms->erspan_ver = 1;
+       if (data[IFLA_GRE_ERSPAN_VER])
+               parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+       if (parms->erspan_ver == 1) {
+               if (data[IFLA_GRE_ERSPAN_INDEX])
+                       parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+       } else if (parms->erspan_ver == 2) {
+               if (data[IFLA_GRE_ERSPAN_DIR])
+                       parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+               if (data[IFLA_GRE_ERSPAN_HWID])
+                       parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+       }
+}
+
 static void ip6gre_netlink_parms(struct nlattr *data[],
                                struct __ip6_tnl_parm *parms)
 {
@@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
-
-       parms->erspan_ver = 1;
-       if (data[IFLA_GRE_ERSPAN_VER])
-               parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
-       if (parms->erspan_ver == 1) {
-               if (data[IFLA_GRE_ERSPAN_INDEX])
-                       parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
-       } else if (parms->erspan_ver == 2) {
-               if (data[IFLA_GRE_ERSPAN_DIR])
-                       parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
-               if (data[IFLA_GRE_ERSPAN_HWID])
-                       parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
-       }
 }
 
 static int ip6gre_tap_init(struct net_device *dev)
@@ -2100,9 +2107,23 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
        struct __ip6_tnl_parm *p = &t->parms;
        __be16 o_flags = p->o_flags;
 
-       if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
-           !p->collect_md)
-               o_flags |= TUNNEL_KEY;
+       if (p->erspan_ver == 1 || p->erspan_ver == 2) {
+               if (!p->collect_md)
+                       o_flags |= TUNNEL_KEY;
+
+               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+                       goto nla_put_failure;
+
+               if (p->erspan_ver == 1) {
+                       if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+                               goto nla_put_failure;
+               } else {
+                       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+                               goto nla_put_failure;
+                       if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+                               goto nla_put_failure;
+               }
+       }
 
        if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
            nla_put_be16(skb, IFLA_GRE_IFLAGS,
@@ -2117,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
            nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
            nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
-           nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
-           nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+           nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
                goto nla_put_failure;
 
        if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -2136,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        goto nla_put_failure;
        }
 
-       if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
-               goto nla_put_failure;
-
-       if (p->erspan_ver == 1) {
-               if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
-                       goto nla_put_failure;
-       } else if (p->erspan_ver == 2) {
-               if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
-                       goto nla_put_failure;
-               if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
-                       goto nla_put_failure;
-       }
-
        return 0;
 
 nla_put_failure:
@@ -2203,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
        int err;
 
        ip6gre_netlink_parms(data, &nt->parms);
+       ip6erspan_set_version(data, &nt->parms);
        ign = net_generic(net, ip6gre_net_id);
 
        if (nt->parms.collect_md) {
@@ -2248,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (IS_ERR(t))
                return PTR_ERR(t);
 
+       ip6erspan_set_version(data, &p);
        ip6gre_tunnel_unlink_md(ign, t);
        ip6gre_tunnel_unlink(ign, t);
        ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
index 964491cf36720fc3fa5601076002e9c90a4e3302..8dad1d690b78967a04f0cee0fdabdf42e2340ca8 100644 (file)
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock);
 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
                                 struct rt6_exception *rt6_ex)
 {
+       struct fib6_info *from;
        struct net *net;
 
        if (!bucket || !rt6_ex)
                return;
 
        net = dev_net(rt6_ex->rt6i->dst.dev);
+       net->ipv6.rt6_stats->fib_rt_cache--;
+
+       /* purge completely the exception to allow releasing the held resources:
+        * some [sk] cache may keep the dst around for unlimited time
+        */
+       from = rcu_dereference_protected(rt6_ex->rt6i->from,
+                                        lockdep_is_held(&rt6_exception_lock));
+       rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+       fib6_info_release(from);
+       dst_dev_put(&rt6_ex->rt6i->dst);
+
        hlist_del_rcu(&rt6_ex->hlist);
        dst_release(&rt6_ex->rt6i->dst);
        kfree_rcu(rt6_ex, rcu);
        WARN_ON_ONCE(!bucket->depth);
        bucket->depth--;
-       net->ipv6.rt6_stats->fib_rt_cache--;
 }
 
 /* Remove oldest rt6_ex in bucket and free the memory
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt)
 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
 {
        struct rt6_exception_bucket *bucket;
-       struct fib6_info *from = rt->from;
        struct in6_addr *src_key = NULL;
        struct rt6_exception *rt6_ex;
-
-       if (!from ||
-           !(rt->rt6i_flags & RTF_CACHE))
-               return;
+       struct fib6_info *from;
 
        rcu_read_lock();
+       from = rcu_dereference(rt->from);
+       if (!from || !(rt->rt6i_flags & RTF_CACHE))
+               goto unlock;
+
        bucket = rcu_dereference(from->rt6i_exception_bucket);
 
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
        if (rt6_ex)
                rt6_ex->stamp = jiffies;
 
+unlock:
        rcu_read_unlock();
 }
 
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net,
        u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
        const struct in6_addr *gw_addr = &cfg->fc_gateway;
        u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+       struct fib6_info *from;
        struct rt6_info *grt;
        int err;
 
        err = 0;
        grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
        if (grt) {
+               rcu_read_lock();
+               from = rcu_dereference(grt->from);
                if (!grt->dst.error &&
                    /* ignore match if it is the default route */
-                   grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+                   from && !ipv6_addr_any(&from->fib6_dst.addr) &&
                    (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
                        NL_SET_ERR_MSG(extack,
                                       "Nexthop has invalid gateway or device mismatch");
                        err = -EINVAL;
                }
+               rcu_read_unlock();
 
                ip6_rt_put(grt);
        }
@@ -4166,6 +4182,10 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
                cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
                cfg->fc_flags |= RTF_GATEWAY;
        }
+       if (tb[RTA_VIA]) {
+               NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+               goto errout;
+       }
 
        if (tb[RTA_DST]) {
                int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4649,7 +4669,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                table = rt->fib6_table->tb6_id;
        else
                table = RT6_TABLE_UNSPEC;
-       rtm->rtm_table = table;
+       rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
        if (nla_put_u32(skb, RTA_TABLE, table))
                goto nla_put_failure;
 
@@ -4873,7 +4893,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
        if (tb[RTA_IP_PROTO]) {
                err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
-                                                 &fl6.flowi6_proto, extack);
+                                                 &fl6.flowi6_proto, AF_INET6,
+                                                 extack);
                if (err)
                        goto errout;
        }
index e8a1dabef803e5583b0fb3bd70d75a48dad46e9c..09e440e8dfaecd290608918e7d2046793effc92d 100644 (file)
@@ -1873,6 +1873,7 @@ static int __net_init sit_init_net(struct net *net)
 
 err_reg_dev:
        ipip6_dev_free(sitn->fb_tunnel_dev);
+       free_netdev(sitn->fb_tunnel_dev);
 err_alloc_dev:
        return err;
 }
index 2596ffdeebeaaa60f096d774f3da32bd5486a2f4..b444483cdb2b42ef7acdbd7d23a0c046f55077c2 100644 (file)
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        int peeked, peeking, off;
        int err;
        int is_udplite = IS_UDPLITE(sk);
+       struct udp_mib __percpu *mib;
        bool checksum_valid = false;
-       struct udp_mib *mib;
        int is_udp4;
 
        if (flags & MSG_ERRQUEUE)
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable);
  */
 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
                                      struct inet6_skb_parm *opt,
-                                     u8 type, u8 code, int offset, u32 info)
+                                     u8 type, u8 code, int offset, __be32 info)
 {
        int i;
 
        for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
                int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                              u8 type, u8 code, int offset, u32 info);
+                              u8 type, u8 code, int offset, __be32 info);
+               const struct ip6_tnl_encap_ops *encap;
 
-               if (!ip6tun_encaps[i])
+               encap = rcu_dereference(ip6tun_encaps[i]);
+               if (!encap)
                        continue;
-               handler = rcu_dereference(ip6tun_encaps[i]->err_handler);
+               handler = encap->err_handler;
                if (handler && !handler(skb, opt, type, code, offset, info))
                        return 0;
        }
index f5b4febeaa25b57604ba3f701e78cfb0c4a23f5c..bc65db782bfb1fa49d5e5f9d2a25c77372905feb 100644 (file)
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
        struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
        unsigned int i;
 
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
        xfrm_flush_gc();
+       xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
 
        for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
                WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
index 655c787f9d54919c66666a4425a26969b9ddf91c..5651c29cb5bd0068d025c9500f6f7513556f65e7 100644 (file)
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
        return 0;
 }
 
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
-                              gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+                              struct sock *sk)
 {
        int err = -ENOBUFS;
 
-       sock_hold(sk);
-       if (*skb2 == NULL) {
-               if (refcount_read(&skb->users) != 1) {
-                       *skb2 = skb_clone(skb, allocation);
-               } else {
-                       *skb2 = skb;
-                       refcount_inc(&skb->users);
-               }
-       }
-       if (*skb2 != NULL) {
-               if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
-                       skb_set_owner_r(*skb2, sk);
-                       skb_queue_tail(&sk->sk_receive_queue, *skb2);
-                       sk->sk_data_ready(sk);
-                       *skb2 = NULL;
-                       err = 0;
-               }
+       if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+               return err;
+
+       skb = skb_clone(skb, allocation);
+
+       if (skb) {
+               skb_set_owner_r(skb, sk);
+               skb_queue_tail(&sk->sk_receive_queue, skb);
+               sk->sk_data_ready(sk);
+               err = 0;
        }
-       sock_put(sk);
        return err;
 }
 
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
 {
        struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
        struct sock *sk;
-       struct sk_buff *skb2 = NULL;
        int err = -ESRCH;
 
        /* XXX Do we need something like netlink_overrun?  I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                 * socket.
                 */
                if (pfk->promisc)
-                       pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+                       pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
                /* the exact target will be processed later */
                if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
                                continue;
                }
 
-               err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+               err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
 
                /* Error is cleared after successful sending to at least one
                 * registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
        rcu_read_unlock();
 
        if (one_sk != NULL)
-               err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+               err = pfkey_broadcast_one(skb, allocation, one_sk);
 
-       kfree_skb(skb2);
        kfree_skb(skb);
        return err;
 }
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m
        if (proto == 0)
                return -EINVAL;
 
-       err = xfrm_state_flush(net, proto, true);
+       err = xfrm_state_flush(net, proto, true, false);
        err2 = unicast_flush_resp(sk, hdr);
        if (err || err2) {
                if (err == -ESRCH) /* empty table - go quietly */
index 2493c74c2d3789a2e504f91b53fc93a02a7234c0..96496b2c1670591d69207b7e6479497ff1e5ba3f 100644 (file)
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
                      BSS_CHANGED_P2P_PS |
                      BSS_CHANGED_TXPOWER;
        int err;
+       int prev_beacon_int;
 
        old = sdata_dereference(sdata->u.ap.beacon, sdata);
        if (old)
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
 
        sdata->needed_rx_chains = sdata->local->rx_chains;
 
+       prev_beacon_int = sdata->vif.bss_conf.beacon_int;
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
        if (params->he_cap)
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        if (!err)
                ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
        mutex_unlock(&local->mtx);
-       if (err)
+       if (err) {
+               sdata->vif.bss_conf.beacon_int = prev_beacon_int;
                return err;
+       }
 
        /*
         * Apply control port protocol, this allows us to
index 87a7299267340675be3bd910183bdd84471deeaf..977dea436ee89dda59c4ba8e3e03114759dca323 100644 (file)
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
         * We need a bit of data queued to build aggregates properly, so
         * instruct the TCP stack to allow more than a single ms of data
         * to be queued in the stack. The value is a bit-shift of 1
-        * second, so 8 is ~4ms of queued data. Only affects local TCP
+        * second, so 7 is ~8ms of queued data. Only affects local TCP
         * sockets.
         * This is the default, anyhow - drivers may need to override it
         * for local reasons (longer buffers, longer completion time, or
         * similar).
         */
-       local->hw.tx_sk_pacing_shift = 8;
+       local->hw.tx_sk_pacing_shift = 7;
 
        /* set up some defaults */
        local->hw.queues = 1;
index cad6592c52a11dcabb7f3fd09878a3515a64095d..2ec7011a4d079840fa1ca23fe76124e170e57362 100644 (file)
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
  * @dst: mesh path destination mac address
  * @mpp: mesh proxy mac address
  * @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
  * @gate_list: list pointer for known gates list
  * @sdata: mesh subif
  * @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@ struct mesh_path {
        u8 dst[ETH_ALEN];
        u8 mpp[ETH_ALEN];       /* used for MPP or MAP */
        struct rhash_head rhash;
+       struct hlist_node walk_list;
        struct hlist_node gate_list;
        struct ieee80211_sub_if_data *sdata;
        struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@ struct mesh_path {
  * gate's mpath may or may not be resolved and active.
  * @gates_lock: protects updates to known_gates
  * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
  * @entries: number of entries in the table
  */
 struct mesh_table {
        struct hlist_head known_gates;
        spinlock_t gates_lock;
        struct rhashtable rhead;
+       struct hlist_head walk_head;
+       spinlock_t walk_lock;
        atomic_t entries;               /* Up to MAX_MESH_NEIGHBOURS */
 };
 
index a5125624a76dce205b28e916a505faae765539ae..88a6d5e18ccc9e13d20af8736f82ed6fa42a8ce5 100644 (file)
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
                return NULL;
 
        INIT_HLIST_HEAD(&newtbl->known_gates);
+       INIT_HLIST_HEAD(&newtbl->walk_head);
        atomic_set(&newtbl->entries,  0);
        spin_lock_init(&newtbl->gates_lock);
+       spin_lock_init(&newtbl->walk_lock);
 
        return newtbl;
 }
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
 static struct mesh_path *
 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 {
-       int i = 0, ret;
-       struct mesh_path *mpath = NULL;
-       struct rhashtable_iter iter;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return NULL;
-
-       rhashtable_walk_start(&iter);
+       int i = 0;
+       struct mesh_path *mpath;
 
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
                if (i++ == idx)
                        break;
        }
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
 
-       if (IS_ERR(mpath) || !mpath)
+       if (!mpath)
                return NULL;
 
        if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
                return ERR_PTR(-ENOMEM);
 
        tbl = sdata->u.mesh.mesh_paths;
+       spin_lock_bh(&tbl->walk_lock);
        do {
                ret = rhashtable_lookup_insert_fast(&tbl->rhead,
                                                    &new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
                        mpath = rhashtable_lookup_fast(&tbl->rhead,
                                                       dst,
                                                       mesh_rht_params);
-
+               else if (!ret)
+                       hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
        } while (unlikely(ret == -EEXIST && !mpath));
+       spin_unlock_bh(&tbl->walk_lock);
 
-       if (ret && ret != -EEXIST)
-               return ERR_PTR(ret);
-
-       /* At this point either new_mpath was added, or we found a
-        * matching entry already in the table; in the latter case
-        * free the unnecessary new entry.
-        */
-       if (ret == -EEXIST) {
+       if (ret) {
                kfree(new_mpath);
+
+               if (ret != -EEXIST)
+                       return ERR_PTR(ret);
+
                new_mpath = mpath;
        }
+
        sdata->u.mesh.mesh_paths_generation++;
        return new_mpath;
 }
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 
        memcpy(new_mpath->mpp, mpp, ETH_ALEN);
        tbl = sdata->u.mesh.mpp_paths;
+
+       spin_lock_bh(&tbl->walk_lock);
        ret = rhashtable_lookup_insert_fast(&tbl->rhead,
                                            &new_mpath->rhash,
                                            mesh_rht_params);
+       if (!ret)
+               hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+       spin_unlock_bh(&tbl->walk_lock);
+
+       if (ret)
+               kfree(new_mpath);
 
        sdata->u.mesh.mpp_paths_generation++;
        return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
        struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
        static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
 
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
                if (rcu_access_pointer(mpath->next_hop) == sta &&
                    mpath->flags & MESH_PATH_ACTIVE &&
                    !(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
                                WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
                }
        }
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       rcu_read_unlock();
 }
 
 static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
 
 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
 {
+       hlist_del_rcu(&mpath->walk_list);
        rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
        mesh_path_free_rcu(tbl, mpath);
 }
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       struct hlist_node *n;
 
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if (rcu_access_pointer(mpath->next_hop) == sta)
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 {
        struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       struct hlist_node *n;
 
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if (ether_addr_equal(mpath->mpp, proxy))
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 static void table_flush_by_iface(struct mesh_table *tbl)
 {
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
-
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
+       struct hlist_node *n;
 
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 /**
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl,
 {
        struct mesh_path *mpath;
 
-       rcu_read_lock();
+       spin_lock_bh(&tbl->walk_lock);
        mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
        if (!mpath) {
-               rcu_read_unlock();
+               spin_unlock_bh(&tbl->walk_lock);
                return -ENXIO;
        }
 
        __mesh_path_del(tbl, mpath);
-       rcu_read_unlock();
+       spin_unlock_bh(&tbl->walk_lock);
        return 0;
 }
 
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
                          struct mesh_table *tbl)
 {
        struct mesh_path *mpath;
-       struct rhashtable_iter iter;
-       int ret;
+       struct hlist_node *n;
 
-       ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
-       if (ret)
-               return;
-
-       rhashtable_walk_start(&iter);
-
-       while ((mpath = rhashtable_walk_next(&iter))) {
-               if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
-                       continue;
-               if (IS_ERR(mpath))
-                       break;
+       spin_lock_bh(&tbl->walk_lock);
+       hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
                if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
                    (!(mpath->flags & MESH_PATH_FIXED)) &&
                     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
                        __mesh_path_del(tbl, mpath);
        }
-
-       rhashtable_walk_stop(&iter);
-       rhashtable_walk_exit(&iter);
+       spin_unlock_bh(&tbl->walk_lock);
 }
 
 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
index bb4d71efb6fb87f462887312ed631a4f11a16148..c2a6da5d80da8868c9b973a987538bce9157e08f 100644 (file)
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        u16 ac, q, hdrlen;
+       int tailroom = 0;
 
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        if (!ifmsh->mshcfg.dot11MeshForwarding)
                goto out;
 
+       if (sdata->crypto_tx_tailroom_needed_cnt)
+               tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
        fwd_skb = skb_copy_expand(skb, local->tx_headroom +
-                                      sdata->encrypt_headroom, 0, GFP_ATOMIC);
+                                      sdata->encrypt_headroom,
+                                 tailroom, GFP_ATOMIC);
        if (!fwd_skb)
                goto out;
 
index 7d55d4c04088710cf0471959927f861dacdfbb10..fa763e2e50ec22f04eb0a35a7726551315787ca1 100644 (file)
@@ -1838,6 +1838,9 @@ static int rtm_to_route_config(struct sk_buff *skb,
                                goto errout;
                        break;
                }
+               case RTA_GATEWAY:
+                       NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+                       goto errout;
                case RTA_VIA:
                {
                        if (nla_get_via(nla, &cfg->rc_via_alen,
index 86afacb07e5fd6f3849b5339d3315dfa642fbe3b..ac8d848d7624fcea1a809e6b4e679791f80121ea 100644 (file)
@@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
 {
        struct ip_vs_dest *dest;
        unsigned int atype, i;
-       int ret = 0;
 
        EnterFunction(2);
 
 #ifdef CONFIG_IP_VS_IPV6
        if (udest->af == AF_INET6) {
+               int ret;
+
                atype = ipv6_addr_type(&udest->addr.in6);
                if ((!(atype & IPV6_ADDR_UNICAST) ||
                        atype & IPV6_ADDR_LINKLOCAL) &&
index 5a92f23f179fe0242016c7ec1deb6e6a5b4fbf4f..4893f248dfdc9f2e959c0b09f2fc7be336bbc53d 100644 (file)
@@ -313,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
        int err;
 
        list_for_each_entry(rule, &ctx->chain->rules, list) {
+               if (!nft_is_active_next(ctx->net, rule))
+                       continue;
+
                err = nft_delrule(ctx, rule);
                if (err < 0)
                        return err;
index ea7c67050792c9093c28b397e5ae164a570a0e33..ee3e5b6471a69ec0100ea9099dc2e8be24a4b024 100644 (file)
@@ -903,7 +903,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len,
                    (state == 0 && (byte & bitmask) == 0))
                        return bit_spot;
 
-               bit_spot++;
+               if (++bit_spot >= bitmap_len)
+                       return -1;
                bitmask >>= 1;
                if (bitmask == 0) {
                        byte = bitmap[++byte_offset];
index 6a196e438b6c03d4c86e0a8a78af1c496a7e599b..d1fc019e932e0c92caccab7d2ef3dc43e12880f0 100644 (file)
@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
                                                      sock->service_name,
                                                      sock->service_name_len,
                                                      &service_name_tlv_length);
+               if (!service_name_tlv) {
+                       err = -ENOMEM;
+                       goto error_tlv;
+               }
                size += service_name_tlv_length;
        }
 
@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
 
        miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
                                      &miux_tlv_length);
+       if (!miux_tlv) {
+               err = -ENOMEM;
+               goto error_tlv;
+       }
        size += miux_tlv_length;
 
        rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+       if (!rw_tlv) {
+               err = -ENOMEM;
+               goto error_tlv;
+       }
        size += rw_tlv_length;
 
        pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
 
        miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
                                      &miux_tlv_length);
+       if (!miux_tlv) {
+               err = -ENOMEM;
+               goto error_tlv;
+       }
        size += miux_tlv_length;
 
        rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+       if (!rw_tlv) {
+               err = -ENOMEM;
+               goto error_tlv;
+       }
        size += rw_tlv_length;
 
        skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
index ef4026a23e802eba74c3269617b8aeb4f68e97ca..4fa015208aab1c5e59e291cb55dd8e296c02eeca 100644 (file)
@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
 
 static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
 {
-       u8 *gb_cur, *version_tlv, version, version_length;
-       u8 *lto_tlv, lto_length;
-       u8 *wks_tlv, wks_length;
-       u8 *miux_tlv, miux_length;
+       u8 *gb_cur, version, version_length;
+       u8 lto_length, wks_length, miux_length;
+       u8 *version_tlv = NULL, *lto_tlv = NULL,
+          *wks_tlv = NULL, *miux_tlv = NULL;
        __be16 wks = cpu_to_be16(local->local_wks);
        u8 gb_len = 0;
        int ret = 0;
@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        version = LLCP_VERSION_11;
        version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
                                         1, &version_length);
+       if (!version_tlv) {
+               ret = -ENOMEM;
+               goto out;
+       }
        gb_len += version_length;
 
        lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+       if (!lto_tlv) {
+               ret = -ENOMEM;
+               goto out;
+       }
        gb_len += lto_length;
 
        pr_debug("Local wks 0x%lx\n", local->local_wks);
        wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+       if (!wks_tlv) {
+               ret = -ENOMEM;
+               goto out;
+       }
        gb_len += wks_length;
 
        miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
                                      &miux_length);
+       if (!miux_tlv) {
+               ret = -ENOMEM;
+               goto out;
+       }
        gb_len += miux_length;
 
        gb_len += ARRAY_SIZE(llcp_magic);
index 9fc76b19cd3c17289604976f61accfb8013eabd6..db34735403035bd327be48658a58d2d553e06491 100644 (file)
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
        ph->utid = 0;
        ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->data[0] = code;
+       ph->error_code = code;
        return pn_skb_send(sk, skb, NULL);
 }
 
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
        ph->utid = id; /* whatever */
        ph->message_id = id;
        ph->pipe_handle = pn->pipe_handle;
-       ph->data[0] = code;
+       ph->error_code = code;
        return pn_skb_send(sk, skb, NULL);
 }
 
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
        struct pnpipehdr *ph;
        struct sockaddr_pn dst;
        u8 data[4] = {
-               oph->data[0], /* PEP type */
+               oph->pep_type, /* PEP type */
                code, /* error code, at an unusual offset */
                PAD, PAD,
        };
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
        ph->utid = oph->utid;
        ph->message_id = PNS_PEP_CTRL_RESP;
        ph->pipe_handle = oph->pipe_handle;
-       ph->data[0] = oph->data[1]; /* CTRL id */
+       ph->data0 = oph->data[0]; /* CTRL id */
 
        pn_skb_get_src_sockaddr(oskb, &dst);
        return pn_skb_send(sk, skb, &dst);
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
                return -EINVAL;
 
        hdr = pnp_hdr(skb);
-       if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+       if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
                net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
-                                   (unsigned int)hdr->data[0]);
+                                   (unsigned int)hdr->pep_type);
                return -EOPNOTSUPP;
        }
 
-       switch (hdr->data[1]) {
+       switch (hdr->data[0]) {
        case PN_PEP_IND_FLOW_CONTROL:
                switch (pn->tx_fc) {
                case PN_LEGACY_FLOW_CONTROL:
-                       switch (hdr->data[4]) {
+                       switch (hdr->data[3]) {
                        case PEP_IND_BUSY:
                                atomic_set(&pn->tx_credits, 0);
                                break;
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
                        }
                        break;
                case PN_ONE_CREDIT_FLOW_CONTROL:
-                       if (hdr->data[4] == PEP_IND_READY)
+                       if (hdr->data[3] == PEP_IND_READY)
                                atomic_set(&pn->tx_credits, wake = 1);
                        break;
                }
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
        case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
                if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
                        break;
-               atomic_add(wake = hdr->data[4], &pn->tx_credits);
+               atomic_add(wake = hdr->data[3], &pn->tx_credits);
                break;
 
        default:
                net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
-                                   (unsigned int)hdr->data[1]);
+                                   (unsigned int)hdr->data[0]);
                return -EOPNOTSUPP;
        }
        if (wake)
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
 {
        struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *hdr = pnp_hdr(skb);
-       u8 n_sb = hdr->data[0];
+       u8 n_sb = hdr->data0;
 
        pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
        __skb_pull(skb, sizeof(*hdr));
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
                return -ECONNREFUSED;
 
        /* Parse sub-blocks */
-       n_sb = hdr->data[4];
+       n_sb = hdr->data[3];
        while (n_sb > 0) {
                u8 type, buf[6], len = sizeof(buf);
                const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk)
        ph->utid = 0;
        ph->message_id = PNS_PIPE_REMOVE_REQ;
        ph->pipe_handle = pn->pipe_handle;
-       ph->data[0] = PAD;
+       ph->data0 = PAD;
        return pn_skb_send(sk, skb, NULL);
 }
 
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
        peer_type = hdr->other_pep_type << 8;
 
        /* Parse sub-blocks (options) */
-       n_sb = hdr->data[4];
+       n_sb = hdr->data[3];
        while (n_sb > 0) {
                u8 type, buf[1], len = sizeof(buf);
                const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
        ph->utid = 0;
        if (pn->aligned) {
                ph->message_id = PNS_PIPE_ALIGNED_DATA;
-               ph->data[0] = 0; /* padding */
+               ph->data0 = 0; /* padding */
        } else
                ph->message_id = PNS_PIPE_DATA;
        ph->pipe_handle = pn->pipe_handle;
index 8af6c11d2482ac53e6520691c85ff291a4704a81..faa1addf89b3b1c32ce493bc16d62ee60671533a 100644 (file)
@@ -199,8 +199,7 @@ err3:
 err2:
        kfree(tname);
 err1:
-       if (ret == ACT_P_CREATED)
-               tcf_idr_release(*a, bind);
+       tcf_idr_release(*a, bind);
        return err;
 }
 
index 64dba3708fce2e096872061bd4405344a957be4b..cfceed28c3331e466782150c7656efda759ef097 100644 (file)
@@ -189,8 +189,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               if (ret == ACT_P_CREATED)
-                       tcf_idr_release(*a, bind);
+               tcf_idr_release(*a, bind);
                return -ENOMEM;
        }
 
index 8b43fe0130f73039e3f1802701df910e079fc4ba..3f943de9a2c975df258e5eb6ae5688b016f0b5d6 100644 (file)
@@ -377,7 +377,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
        return ret;
 
 release_tun_meta:
-       dst_release(&metadata->dst);
+       if (metadata)
+               dst_release(&metadata->dst);
 
 err_out:
        if (exists)
index 75046ec7214449c631c38eaab5e4a51644cfa0e5..cc9d8133afcdb7fe1b6b4c35f9d96d636ea11035 100644 (file)
@@ -447,6 +447,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        int nb = 0;
        int count = 1;
        int rc = NET_XMIT_SUCCESS;
+       int rc_drop = NET_XMIT_DROP;
 
        /* Do not fool qdisc_drop_all() */
        skb->prev = NULL;
@@ -486,6 +487,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                q->duplicate = 0;
                rootq->enqueue(skb2, rootq, to_free);
                q->duplicate = dupsave;
+               rc_drop = NET_XMIT_SUCCESS;
        }
 
        /*
@@ -498,7 +500,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                if (skb_is_gso(skb)) {
                        segs = netem_segment(skb, sch, to_free);
                        if (!segs)
-                               return NET_XMIT_DROP;
+                               return rc_drop;
                } else {
                        segs = skb;
                }
@@ -521,8 +523,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                        1<<(prandom_u32() % 8);
        }
 
-       if (unlikely(sch->q.qlen >= sch->limit))
-               return qdisc_drop_all(skb, sch, to_free);
+       if (unlikely(sch->q.qlen >= sch->limit)) {
+               qdisc_drop_all(skb, sch, to_free);
+               return rc_drop;
+       }
 
        qdisc_qstats_backlog_inc(sch, skb);
 
index 64bef313d43643757140ffa850724423fd801cd5..5cb7c1ff97e92e3ec586f2f23b480f5236f34296 100644 (file)
@@ -192,7 +192,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        if (unlikely(!max_data)) {
                max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
                                               sctp_datachk_len(&asoc->stream));
-               pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+               pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
                                    __func__, asoc, max_data);
        }
 
index 033696e6f74fbea97392059d243a078af925e832..ad158d311ffaea073f0c13d76d20f4756ceec879 100644 (file)
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
 
        /* When a data chunk is sent, reset the heartbeat interval.  */
        expires = jiffies + sctp_transport_timeout(transport);
-       if (time_before(transport->hb_timer.expires, expires) &&
+       if ((time_before(transport->hb_timer.expires, expires) ||
+            !timer_pending(&transport->hb_timer)) &&
            !mod_timer(&transport->hb_timer,
                       expires + prandom_u32_max(transport->rto)))
                sctp_transport_hold(transport);
index 5721416d060534ff294be6db270e699fd942ba7f..adbdf195eb085f5507444340fb1f8ddcb69548d5 100644 (file)
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg {           /* Connection Data Control message */
 } __aligned(8);
 
 enum smc_urg_state {
-       SMC_URG_VALID,                  /* data present */
-       SMC_URG_NOTYET,                 /* data pending */
-       SMC_URG_READ                    /* data was already read */
+       SMC_URG_VALID   = 1,                    /* data present */
+       SMC_URG_NOTYET  = 2,                    /* data pending */
+       SMC_URG_READ    = 3,                    /* data was already read */
 };
 
 struct smc_connection {
index d80d87a395ea99c58624feb204c896e697140bfa..320f51b22b1903c95f5d5c25127f9785a8162f53 100644 (file)
@@ -577,6 +577,7 @@ static void __sock_release(struct socket *sock, struct inode *inode)
                if (inode)
                        inode_lock(inode);
                sock->ops->release(sock);
+               sock->sk = NULL;
                if (inode)
                        inode_unlock(inode);
                sock->ops = NULL;
index 1217c90a363b75f7d74ab45b1f1cd6c538a872ce..70343ac448b182eec10b648372dc9109ffe78ce1 100644 (file)
@@ -379,16 +379,18 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
 
 #define tipc_wait_for_cond(sock_, timeo_, condition_)                         \
 ({                                                                             \
+       DEFINE_WAIT_FUNC(wait_, woken_wake_function);                          \
        struct sock *sk_;                                                      \
        int rc_;                                                               \
                                                                               \
        while ((rc_ = !(condition_))) {                                        \
-               DEFINE_WAIT_FUNC(wait_, woken_wake_function);                  \
+               /* coupled with smp_wmb() in tipc_sk_proto_rcv() */            \
+               smp_rmb();                                                     \
                sk_ = (sock_)->sk;                                             \
                rc_ = tipc_sk_sock_err((sock_), timeo_);                       \
                if (rc_)                                                       \
                        break;                                                 \
-               prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE);    \
+               add_wait_queue(sk_sleep(sk_), &wait_);                         \
                release_sock(sk_);                                             \
                *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
                sched_annotate_sleep();                                        \
@@ -1677,7 +1679,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
 {
        struct sock *sk = sock->sk;
-       DEFINE_WAIT(wait);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
        long timeo = *timeop;
        int err = sock_error(sk);
 
@@ -1685,15 +1687,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                return err;
 
        for (;;) {
-               prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
                        if (sk->sk_shutdown & RCV_SHUTDOWN) {
                                err = -ENOTCONN;
                                break;
                        }
+                       add_wait_queue(sk_sleep(sk), &wait);
                        release_sock(sk);
-                       timeo = schedule_timeout(timeo);
+                       timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+                       sched_annotate_sleep();
                        lock_sock(sk);
+                       remove_wait_queue(sk_sleep(sk), &wait);
                }
                err = 0;
                if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1709,7 +1713,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
                if (err)
                        break;
        }
-       finish_wait(sk_sleep(sk), &wait);
        *timeop = timeo;
        return err;
 }
@@ -1982,6 +1985,8 @@ static void tipc_sk_proto_rcv(struct sock *sk,
                return;
        case SOCK_WAKEUP:
                tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+               /* coupled with smp_rmb() in tipc_wait_for_cond() */
+               smp_wmb();
                tsk->cong_link_cnt--;
                wakeup = true;
                break;
index 74d1eed7cbd4ea1c641db2fa9a104cc648715d24..a95d479caeea022df4ae98c2267ad4b3427d751d 100644 (file)
@@ -890,7 +890,7 @@ retry:
        addr->hash ^= sk->sk_type;
 
        __unix_remove_socket(sk);
-       u->addr = addr;
+       smp_store_release(&u->addr, addr);
        __unix_insert_socket(&unix_socket_table[addr->hash], sk);
        spin_unlock(&unix_table_lock);
        err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = 0;
        __unix_remove_socket(sk);
-       u->addr = addr;
+       smp_store_release(&u->addr, addr);
        __unix_insert_socket(list, sk);
 
 out_unlock:
@@ -1331,15 +1331,29 @@ restart:
        RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
        otheru = unix_sk(other);
 
-       /* copy address information from listening to new sock*/
-       if (otheru->addr) {
-               refcount_inc(&otheru->addr->refcnt);
-               newu->addr = otheru->addr;
-       }
+       /* copy address information from listening to new sock
+        *
+        * The contents of *(otheru->addr) and otheru->path
+        * are seen fully set up here, since we have found
+        * otheru in hash under unix_table_lock.  Insertion
+        * into the hash chain we'd found it in had been done
+        * in an earlier critical area protected by unix_table_lock,
+        * the same one where we'd set *(otheru->addr) contents,
+        * as well as otheru->path and otheru->addr itself.
+        *
+        * Using smp_store_release() here to set newu->addr
+        * is enough to make those stores, as well as stores
+        * to newu->path visible to anyone who gets newu->addr
+        * by smp_load_acquire().  IOW, the same warranties
+        * as for unix_sock instances bound in unix_bind() or
+        * in unix_autobind().
+        */
        if (otheru->path.dentry) {
                path_get(&otheru->path);
                newu->path = otheru->path;
        }
+       refcount_inc(&otheru->addr->refcnt);
+       smp_store_release(&newu->addr, otheru->addr);
 
        /* Set credentials */
        copy_peercred(sk, other);
@@ -1453,7 +1467,7 @@ out:
 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
 {
        struct sock *sk = sock->sk;
-       struct unix_sock *u;
+       struct unix_address *addr;
        DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
        int err = 0;
 
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
                sock_hold(sk);
        }
 
-       u = unix_sk(sk);
-       unix_state_lock(sk);
-       if (!u->addr) {
+       addr = smp_load_acquire(&unix_sk(sk)->addr);
+       if (!addr) {
                sunaddr->sun_family = AF_UNIX;
                sunaddr->sun_path[0] = 0;
                err = sizeof(short);
        } else {
-               struct unix_address *addr = u->addr;
-
                err = addr->len;
                memcpy(sunaddr, addr->name, addr->len);
        }
-       unix_state_unlock(sk);
        sock_put(sk);
 out:
        return err;
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
 
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
 {
-       struct unix_sock *u = unix_sk(sk);
+       struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
-       if (u->addr) {
-               msg->msg_namelen = u->addr->len;
-               memcpy(msg->msg_name, u->addr->name, u->addr->len);
+       if (addr) {
+               msg->msg_namelen = addr->len;
+               memcpy(msg->msg_name, addr->name, addr->len);
        }
 }
 
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk)
        if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       unix_state_lock(sk);
+       if (!smp_load_acquire(&unix_sk(sk)->addr))
+               return -ENOENT;
+
        path = unix_sk(sk)->path;
-       if (!path.dentry) {
-               unix_state_unlock(sk);
+       if (!path.dentry)
                return -ENOENT;
-       }
 
        path_get(&path);
-       unix_state_unlock(sk);
 
        fd = get_unused_fd_flags(O_CLOEXEC);
        if (fd < 0)
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
                        (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
                        sock_i_ino(s));
 
-               if (u->addr) {
+               if (u->addr) {  // under unix_table_lock here
                        int i, len;
                        seq_putc(seq, ' ');
 
index 384c84e83462e51d24e469515f4b52f8dcf55877..3183d9b8ab33232c6f42686677c056a58bc5d2fa 100644 (file)
@@ -10,7 +10,8 @@
 
 static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
 {
-       struct unix_address *addr = unix_sk(sk)->addr;
+       /* might or might not have unix_table_lock */
+       struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
 
        if (!addr)
                return 0;
index ec3a828672ef53991146bf94a2266932a5f25872..eff31348e20b121ed2ca740c9d2c89e26da316d9 100644 (file)
@@ -679,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
        int len, i, rc = 0;
 
-       if (!sock_flag(sk, SOCK_ZAPPED) ||
-           addr_len != sizeof(struct sockaddr_x25) ||
+       if (addr_len != sizeof(struct sockaddr_x25) ||
            addr->sx25_family != AF_X25) {
                rc = -EINVAL;
                goto out;
@@ -699,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        }
 
        lock_sock(sk);
-       x25_sk(sk)->source_addr = addr->sx25_addr;
-       x25_insert_socket(sk);
-       sock_reset_flag(sk, SOCK_ZAPPED);
+       if (sock_flag(sk, SOCK_ZAPPED)) {
+               x25_sk(sk)->source_addr = addr->sx25_addr;
+               x25_insert_socket(sk);
+               sock_reset_flag(sk, SOCK_ZAPPED);
+       } else {
+               rc = -EINVAL;
+       }
        release_sock(sk);
        SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
 out:
index d4de871e7d4d7fa8f2c426559e92313c33d2655d..37e1fe18076926563f827d2124448de42a2e29c7 100644 (file)
@@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
        return 0;
 
 err_unreg_umem:
-       xdp_clear_umem_at_qid(dev, queue_id);
        if (!force_zc)
                err = 0; /* fallback to copy mode */
+       if (err)
+               xdp_clear_umem_at_qid(dev, queue_id);
 out_rtnl_unlock:
        rtnl_unlock();
        return err;
@@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem)
        if (!umem->pgs)
                return -ENOMEM;
 
-       down_write(&current->mm->mmap_sem);
-       npgs = get_user_pages(umem->address, umem->npgs,
-                             gup_flags, &umem->pgs[0], NULL);
-       up_write(&current->mm->mmap_sem);
+       down_read(&current->mm->mmap_sem);
+       npgs = get_user_pages_longterm(umem->address, umem->npgs,
+                                      gup_flags, &umem->pgs[0], NULL);
+       up_read(&current->mm->mmap_sem);
 
        if (npgs != umem->npgs) {
                if (npgs >= 0) {
index a03268454a2762d12b9dec3e0d3858869e34b742..85e4fe4f18cced0c546390390561c25e5544ed0c 100644 (file)
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock)
 
        xskq_destroy(xs->rx);
        xskq_destroy(xs->tx);
-       xdp_put_umem(xs->umem);
 
        sock_orphan(sk);
        sock->sk = NULL;
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
                if (!umem)
                        return -EINVAL;
 
+               /* Matches the smp_wmb() in XDP_UMEM_REG */
+               smp_rmb();
                if (offset == XDP_UMEM_PGOFF_FILL_RING)
                        q = READ_ONCE(umem->fq);
                else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
        if (!q)
                return -EINVAL;
 
+       /* Matches the smp_wmb() in xsk_init_queue */
+       smp_rmb();
        qpg = virt_to_head_page(q->ring);
        if (size > (PAGE_SIZE << compound_order(qpg)))
                return -EINVAL;
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = {
        .sendpage       = sock_no_sendpage,
 };
 
+static void xsk_destruct(struct sock *sk)
+{
+       struct xdp_sock *xs = xdp_sk(sk);
+
+       if (!sock_flag(sk, SOCK_DEAD))
+               return;
+
+       xdp_put_umem(xs->umem);
+
+       sk_refcnt_debug_dec(sk);
+}
+
 static int xsk_create(struct net *net, struct socket *sock, int protocol,
                      int kern)
 {
@@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
 
        sk->sk_family = PF_XDP;
 
+       sk->sk_destruct = xsk_destruct;
+       sk_refcnt_debug_inc(sk);
+
        sock_set_flag(sk, SOCK_RCU_FREE);
 
        xs = xdp_sk(sk);
index 6be8c7df15bb20f5a641a6922110b63f80652dca..dbb3c1945b5c911b5933f60b284015a91524c832 100644 (file)
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb)
        int ifindex;
        struct xfrm_if *xi;
 
-       if (!skb->dev)
+       if (!secpath_exists(skb) || !skb->dev)
                return NULL;
 
-       xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id);
+       xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
        ifindex = skb->dev->ifindex;
 
        for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
index ba0a4048c846bbbbde6e0d3320f6d4cfa46da424..8d1a898d0ba562a25e8d42b1692d62ba766b7353 100644 (file)
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
 
        if (ifcb) {
                xi = ifcb->decode_session(skb);
-               if (xi)
+               if (xi) {
                        if_id = xi->p.if_id;
+                       net = xi->net;
+               }
        }
        rcu_read_unlock();
 
index 23c92891758a829e06dd776baf7ca340dec67b9f..1bb971f46fc6f9096f59c99740d6e832276c3b34 100644 (file)
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x)
 }
 EXPORT_SYMBOL(xfrm_state_free);
 
-static void xfrm_state_gc_destroy(struct xfrm_state *x)
+static void ___xfrm_state_destroy(struct xfrm_state *x)
 {
        tasklet_hrtimer_cancel(&x->mtimer);
        del_timer_sync(&x->rtimer);
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work)
        synchronize_rcu();
 
        hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
-               xfrm_state_gc_destroy(x);
+               ___xfrm_state_destroy(x);
 }
 
 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
 }
 EXPORT_SYMBOL(xfrm_state_alloc);
 
-void __xfrm_state_destroy(struct xfrm_state *x)
+void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
 {
        WARN_ON(x->km.state != XFRM_STATE_DEAD);
 
-       spin_lock_bh(&xfrm_state_gc_lock);
-       hlist_add_head(&x->gclist, &xfrm_state_gc_list);
-       spin_unlock_bh(&xfrm_state_gc_lock);
-       schedule_work(&xfrm_state_gc_work);
+       if (sync) {
+               synchronize_rcu();
+               ___xfrm_state_destroy(x);
+       } else {
+               spin_lock_bh(&xfrm_state_gc_lock);
+               hlist_add_head(&x->gclist, &xfrm_state_gc_list);
+               spin_unlock_bh(&xfrm_state_gc_lock);
+               schedule_work(&xfrm_state_gc_work);
+       }
 }
 EXPORT_SYMBOL(__xfrm_state_destroy);
 
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool
 }
 #endif
 
-int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
+int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
 {
        int i, err = 0, cnt = 0;
 
@@ -730,7 +735,10 @@ restart:
                                err = xfrm_state_delete(x);
                                xfrm_audit_state_delete(x, err ? 0 : 1,
                                                        task_valid);
-                               xfrm_state_put(x);
+                               if (sync)
+                                       xfrm_state_put_sync(x);
+                               else
+                                       xfrm_state_put(x);
                                if (!err)
                                        cnt++;
 
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
                if (atomic_read(&t->tunnel_users) == 2)
                        xfrm_state_delete(t);
                atomic_dec(&t->tunnel_users);
-               xfrm_state_put(t);
+               xfrm_state_put_sync(t);
                x->tunnel = NULL;
        }
 }
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net)
        unsigned int sz;
 
        flush_work(&net->xfrm.state_hash_work);
-       xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
        flush_work(&xfrm_state_gc_work);
+       xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true);
 
        WARN_ON(!list_empty(&net->xfrm.state_all));
 
index c6d26afcf89df4c9327acf3074d042cbbc8eba62..a131f9ff979e1b64015ade91942cf1ce88eee15c 100644 (file)
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_usersa_flush *p = nlmsg_data(nlh);
        int err;
 
-       err = xfrm_state_flush(net, p->proto, true);
+       err = xfrm_state_flush(net, p->proto, true, false);
        if (err) {
                if (err == -ESRCH) /* empty table */
                        return 0;
index 25c259df8ffab1b108994ac12d78b35f97b65403..6deabedc67fc891484be732b7591d4301bf156e0 100644 (file)
@@ -26,7 +26,7 @@ else
        CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
         $(call cc-param,asan-globals=1) \
         $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
-        $(call cc-param,asan-stack=1) \
+        $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
         $(call cc-param,asan-use-after-scope=1) \
         $(call cc-param,asan-instrument-allocas=1)
 endif
index 77cebad0474e5393b7c20c1d65ec22b969df29f5..f75e7bda48898e73b172490a28fb3e4c0f117e84 100644 (file)
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s)
                        fprintf(stderr, "Read error or end of file.\n");
                return -1;
        }
-       if (strlen(sym) > KSYM_NAME_LEN) {
-               fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
+       if (strlen(sym) >= KSYM_NAME_LEN) {
+               fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n"
                                "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
                        sym, strlen(sym), KSYM_NAME_LEN);
                return -1;
index 479909b858c7f1a4fa0c5b36cc963170a2fb215b..8f533c81aa8ddefc7eca6ea6a29edc62a5a21a72 100644 (file)
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm)
        return key_task_permission(key_ref, current_cred(), perm);
 }
 
-/*
- * Authorisation record for request_key().
- */
-struct request_key_auth {
-       struct key              *target_key;
-       struct key              *dest_keyring;
-       const struct cred       *cred;
-       void                    *callout_info;
-       size_t                  callout_len;
-       pid_t                   pid;
-} __randomize_layout;
-
 extern struct key_type key_type_request_key_auth;
 extern struct key *request_key_auth_new(struct key *target,
+                                       const char *op,
                                        const void *callout_info,
                                        size_t callout_len,
                                        struct key *dest_keyring);
index 44a80d6741a1d66ee728e5e77880fe950a665663..696f1c092c508fbee1e1c0c7e713861eb4811ee5 100644 (file)
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
 
                spin_lock(&user->lock);
                if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
-                       if (user->qnkeys + 1 >= maxkeys ||
-                           user->qnbytes + quotalen >= maxbytes ||
+                       if (user->qnkeys + 1 > maxkeys ||
+                           user->qnbytes + quotalen > maxbytes ||
                            user->qnbytes + quotalen < user->qnbytes)
                                goto no_quota;
                }
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
        key->gid = gid;
        key->perm = perm;
        key->restrict_link = restrict_link;
+       key->last_used_at = ktime_get_real_seconds();
 
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
                key->flags |= 1 << KEY_FLAG_IN_QUOTA;
index e8093d025966c6dc97590a99babefd4f99cbb96a..7bbe03593e581116c0a9678a9e50788fdbac80ed 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/security.h>
 #include <linux/uio.h>
 #include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
 #include "internal.h"
 
 #define KEY_MAX_DESC_SIZE 4096
index eadebb92986ac3fa5e9e269b6bc1fc79dc64a438..f81372f53dd706ba966d89c1dffb9ab1fe1b63e7 100644 (file)
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
        BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
               (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
 
-       if (ctx->index_key.description)
-               ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
        /* Check to see if this top-level keyring is what we are looking for
         * and whether it is valid or not.
         */
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
        struct keyring_search_context ctx = {
                .index_key.type         = type,
                .index_key.description  = description,
+               .index_key.desc_len     = strlen(description),
                .cred                   = current_cred(),
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
index d2b802072693d33dfd167017ffdc0f19e006c37f..78ac305d715ed06163b2aa19dd3e685218bee691 100644 (file)
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
        int rc;
 
        struct keyring_search_context ctx = {
-               .index_key.type         = key->type,
-               .index_key.description  = key->description,
+               .index_key              = key->index_key,
                .cred                   = m->file->f_cred,
                .match_data.cmp         = lookup_user_key_possessed,
                .match_data.raw_data    = key,
index 02c77e928f68d123d88c433b4da03fbe85e24cee..0e0b9ccad2f882f8f62540da055740f88383e2d4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/security.h>
 #include <linux/user_namespace.h>
 #include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
 #include "internal.h"
 
 /* Session keyring create vs join semaphore */
index 301f0e300dbd28d1557e875e68899e133315cfbd..7a0c6b666ff03a5e59e979a70fd14ed808b46861 100644 (file)
 #include <linux/keyctl.h>
 #include <linux/slab.h>
 #include "internal.h"
+#include <keys/request_key_auth-type.h>
 
 #define key_negative_timeout   60      /* default timeout on a negative key's existence */
 
 /**
  * complete_request_key - Complete the construction of a key.
- * @cons: The key construction record.
+ * @auth_key: The authorisation key.
  * @error: The success or failute of the construction.
  *
  * Complete the attempt to construct a key.  The key will be negated
  * if an error is indicated.  The authorisation key will be revoked
  * unconditionally.
  */
-void complete_request_key(struct key_construction *cons, int error)
+void complete_request_key(struct key *authkey, int error)
 {
-       kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
+       struct request_key_auth *rka = get_request_key_auth(authkey);
+       struct key *key = rka->target_key;
+
+       kenter("%d{%d},%d", authkey->serial, key->serial, error);
 
        if (error < 0)
-               key_negate_and_link(cons->key, key_negative_timeout, NULL,
-                                   cons->authkey);
+               key_negate_and_link(key, key_negative_timeout, NULL, authkey);
        else
-               key_revoke(cons->authkey);
-
-       key_put(cons->key);
-       key_put(cons->authkey);
-       kfree(cons);
+               key_revoke(authkey);
 }
 EXPORT_SYMBOL(complete_request_key);
 
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
  * Request userspace finish the construction of a key
  * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
  */
-static int call_sbin_request_key(struct key_construction *cons,
-                                const char *op,
-                                void *aux)
+static int call_sbin_request_key(struct key *authkey, void *aux)
 {
        static char const request_key[] = "/sbin/request-key";
+       struct request_key_auth *rka = get_request_key_auth(authkey);
        const struct cred *cred = current_cred();
        key_serial_t prkey, sskey;
-       struct key *key = cons->key, *authkey = cons->authkey, *keyring,
-               *session;
+       struct key *key = rka->target_key, *keyring, *session;
        char *argv[9], *envp[3], uid_str[12], gid_str[12];
        char key_str[12], keyring_str[3][12];
        char desc[20];
        int ret, i;
 
-       kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
+       kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
 
        ret = install_user_keyrings();
        if (ret < 0)
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons,
        /* set up the argument list */
        i = 0;
        argv[i++] = (char *)request_key;
-       argv[i++] = (char *) op;
+       argv[i++] = (char *)rka->op;
        argv[i++] = key_str;
        argv[i++] = uid_str;
        argv[i++] = gid_str;
@@ -191,7 +188,7 @@ error_link:
        key_put(keyring);
 
 error_alloc:
-       complete_request_key(cons, ret);
+       complete_request_key(authkey, ret);
        kleave(" = %d", ret);
        return ret;
 }
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info,
                         size_t callout_len, void *aux,
                         struct key *dest_keyring)
 {
-       struct key_construction *cons;
        request_key_actor_t actor;
        struct key *authkey;
        int ret;
 
        kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
 
-       cons = kmalloc(sizeof(*cons), GFP_KERNEL);
-       if (!cons)
-               return -ENOMEM;
-
        /* allocate an authorisation key */
-       authkey = request_key_auth_new(key, callout_info, callout_len,
+       authkey = request_key_auth_new(key, "create", callout_info, callout_len,
                                       dest_keyring);
-       if (IS_ERR(authkey)) {
-               kfree(cons);
-               ret = PTR_ERR(authkey);
-               authkey = NULL;
-       } else {
-               cons->authkey = key_get(authkey);
-               cons->key = key_get(key);
+       if (IS_ERR(authkey))
+               return PTR_ERR(authkey);
 
-               /* make the call */
-               actor = call_sbin_request_key;
-               if (key->type->request_key)
-                       actor = key->type->request_key;
+       /* Make the call */
+       actor = call_sbin_request_key;
+       if (key->type->request_key)
+               actor = key->type->request_key;
 
-               ret = actor(cons, "create", aux);
+       ret = actor(authkey, aux);
 
-               /* check that the actor called complete_request_key() prior to
-                * returning an error */
-               WARN_ON(ret < 0 &&
-                       !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
-               key_put(authkey);
-       }
+       /* check that the actor called complete_request_key() prior to
+        * returning an error */
+       WARN_ON(ret < 0 &&
+               !test_bit(KEY_FLAG_REVOKED, &authkey->flags));
 
+       key_put(authkey);
        kleave(" = %d", ret);
        return ret;
 }
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
                        if (cred->request_key_auth) {
                                authkey = cred->request_key_auth;
                                down_read(&authkey->sem);
-                               rka = authkey->payload.data[0];
+                               rka = get_request_key_auth(authkey);
                                if (!test_bit(KEY_FLAG_REVOKED,
                                              &authkey->flags))
                                        dest_keyring =
@@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type,
        struct keyring_search_context ctx = {
                .index_key.type         = type,
                .index_key.description  = description,
+               .index_key.desc_len     = strlen(description),
                .cred                   = current_cred(),
                .match_data.cmp         = key_default_cmp,
                .match_data.raw_data    = description,
index 87ea2f54dedc3b2ceb72fad8a60511159d51ea3d..bda6201c6c4508d06380d87f283e73bab758a2ad 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include "internal.h"
-#include <keys/user-type.h>
+#include <keys/request_key_auth-type.h>
 
 static int request_key_auth_preparse(struct key_preparsed_payload *);
 static void request_key_auth_free_preparse(struct key_preparsed_payload *);
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key,
 static void request_key_auth_describe(const struct key *key,
                                      struct seq_file *m)
 {
-       struct request_key_auth *rka = key->payload.data[0];
+       struct request_key_auth *rka = get_request_key_auth(key);
 
        seq_puts(m, "key:");
        seq_puts(m, key->description);
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key,
 static long request_key_auth_read(const struct key *key,
                                  char __user *buffer, size_t buflen)
 {
-       struct request_key_auth *rka = key->payload.data[0];
+       struct request_key_auth *rka = get_request_key_auth(key);
        size_t datalen;
        long ret;
 
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key,
  */
 static void request_key_auth_revoke(struct key *key)
 {
-       struct request_key_auth *rka = key->payload.data[0];
+       struct request_key_auth *rka = get_request_key_auth(key);
 
        kenter("{%d}", key->serial);
 
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka)
  */
 static void request_key_auth_destroy(struct key *key)
 {
-       struct request_key_auth *rka = key->payload.data[0];
+       struct request_key_auth *rka = get_request_key_auth(key);
 
        kenter("{%d}", key->serial);
 
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key)
  * Create an authorisation token for /sbin/request-key or whoever to gain
  * access to the caller's security data.
  */
-struct key *request_key_auth_new(struct key *target, const void *callout_info,
-                                size_t callout_len, struct key *dest_keyring)
+struct key *request_key_auth_new(struct key *target, const char *op,
+                                const void *callout_info, size_t callout_len,
+                                struct key *dest_keyring)
 {
        struct request_key_auth *rka, *irka;
        const struct cred *cred = current->cred;
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
        if (!rka->callout_info)
                goto error_free_rka;
        rka->callout_len = callout_len;
+       strlcpy(rka->op, op, sizeof(rka->op));
 
        /* see if the calling process is already servicing the key request of
         * another process */
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
        struct key *authkey;
        key_ref_t authkey_ref;
 
-       sprintf(description, "%x", target_id);
+       ctx.index_key.desc_len = sprintf(description, "%x", target_id);
 
        authkey_ref = search_process_keyrings(&ctx);
 
index f8400101935661aeecd629bef6b28f36561b56b9..33028c098ef3c6314c39c4e62ccf223dde7c7eb3 100644 (file)
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                if (a->u.net->sk) {
                        struct sock *sk = a->u.net->sk;
                        struct unix_sock *u;
+                       struct unix_address *addr;
                        int len = 0;
                        char *p = NULL;
 
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
 #endif
                        case AF_UNIX:
                                u = unix_sk(sk);
+                               addr = smp_load_acquire(&u->addr);
+                               if (!addr)
+                                       break;
                                if (u->path.dentry) {
                                        audit_log_d_path(ab, " path=", &u->path);
                                        break;
                                }
-                               if (!u->addr)
-                                       break;
-                               len = u->addr->len-sizeof(short);
-                               p = &u->addr->name->sun_path[0];
+                               len = addr->len-sizeof(short);
+                               p = &addr->name->sun_path[0];
                                audit_log_format(ab, " path=");
                                if (*p)
                                        audit_log_untrustedstring(ab, p);
index 6df758adff84bcbc15dcc4effc3cbb98f3ea0d9f..1ffa36e987b40d4dc65c5f12c23c8ca109f626b2 100644 (file)
@@ -1855,6 +1855,8 @@ enum {
        ALC887_FIXUP_BASS_CHMAP,
        ALC1220_FIXUP_GB_DUAL_CODECS,
        ALC1220_FIXUP_CLEVO_P950,
+       ALC1220_FIXUP_SYSTEM76_ORYP5,
+       ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2056,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
        snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
 }
 
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action);
+
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+                                    const struct hda_fixup *fix,
+                                    int action)
+{
+       alc1220_fixup_clevo_p950(codec, fix, action);
+       alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+}
+
 static const struct hda_fixup alc882_fixups[] = {
        [ALC882_FIXUP_ABIT_AW9D_MAX] = {
                .type = HDA_FIXUP_PINS,
@@ -2300,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc1220_fixup_clevo_p950,
        },
+       [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc1220_fixup_system76_oryp5,
+       },
+       [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2376,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+       SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -5632,6 +5660,7 @@ enum {
        ALC294_FIXUP_ASUS_SPK,
        ALC225_FIXUP_HEADSET_JACK,
        ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+       ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6587,6 +6616,17 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
+       [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       /* Disable PCBEEP-IN passthrough */
+                       { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+                       { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7272,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60130},
                {0x19, 0x03a11020},
                {0x21, 0x0321101f}),
-       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
                {0x12, 0x90a60130},
                {0x14, 0x90170110},
                {0x19, 0x04a11040},
index 37e001cf9cd1dc5f09c019b63e1719d061a36cd1..3fe34417ec897df5fd4d6d5f177ee01655e39776 100644 (file)
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
        conf_idx        = 0;
        node = of_get_child_by_name(top, PREFIX "dai-link");
        if (!node) {
-               node = dev->of_node;
+               node = of_node_get(top);
                loop = 0;
        }
 
index ce00fe2f6aae32d2eb777c2ac7533f3ecdd47a0a..d4bde4834ce5f1b581f4febe5fb0341ab609b321 100644 (file)
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
        unsigned int fmt)
 {
        struct i2s_dai *i2s = to_info(dai);
+       struct i2s_dai *other = get_other_dai(i2s);
        int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
        u32 mod, tmp = 0;
        unsigned long flags;
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
                 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
                 * clock configuration assigned in DT is not overwritten.
                 */
-               if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
+               if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL &&
+                   other->clk_data.clks == NULL)
                        i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
                                                        0, SND_SOC_CLOCK_IN);
                break;
@@ -699,6 +701,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
 {
        struct i2s_dai *i2s = to_info(dai);
+       struct i2s_dai *other = get_other_dai(i2s);
        u32 mod, mask = 0, val = 0;
        struct clk *rclksrc;
        unsigned long flags;
@@ -784,6 +787,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
        i2s->frmclk = params_rate(params);
 
        rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
+       if (!rclksrc || IS_ERR(rclksrc))
+               rclksrc = other->clk_table[CLK_I2S_RCLK_SRC];
+
        if (rclksrc && !IS_ERR(rclksrc))
                i2s->rclk_srcrate = clk_get_rate(rclksrc);
 
index fc79ec6927e385fbd85ab6182a6788daea610726..731b963b6995f6022c167fc0e921f9cf1bbf841e 100644 (file)
@@ -2487,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
        struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
 {
        struct soc_tplg tplg;
+       int ret;
 
        /* setup parsing context */
        memset(&tplg, 0, sizeof(tplg));
@@ -2500,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
        tplg.bytes_ext_ops = ops->bytes_ext_ops;
        tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
 
-       return soc_tplg_load(&tplg);
+       ret = soc_tplg_load(&tplg);
+       /* free the created components if fail to load topology */
+       if (ret)
+               snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
 
index 147e34cfceb79bef10672643c0ba265ccf68293d..02d7c871862af26080e823a6936a54bbd182e454 100644 (file)
@@ -474,6 +474,16 @@ static void test_lpm_delete(void)
        assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
                errno == ENOENT);
 
+       key->prefixlen = 30; // unused prefix so far
+       inet_pton(AF_INET, "192.255.0.0", key->data);
+       assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+               errno == ENOENT);
+
+       key->prefixlen = 16; // same prefix as the root node
+       inet_pton(AF_INET, "192.255.0.0", key->data);
+       assert(bpf_map_delete_elem(map_fd, key) == -1 &&
+               errno == ENOENT);
+
        /* assert initial lookup */
        key->prefixlen = 32;
        inet_pton(AF_INET, "192.168.0.1", key->data);
index 802b4af187297a7c8f2ddf1e086877b35cd41687..1080ff55a788f720f240271741fbc38680061b7a 100755 (executable)
@@ -388,6 +388,7 @@ fib_carrier_unicast_test()
 
        set -e
        $IP link set dev dummy0 carrier off
+       sleep 1
        set +e
 
        echo "    Carrier down"
index e2c94e47707cb3e3af50b539db2cc66d9fc88890..912b2dc50be3d8ee88f51a41e8c440524ecddc4a 100755 (executable)
 #      and check that configured MTU is used on link creation and changes, and
 #      that MTU is properly calculated instead when MTU is not configured from
 #      userspace
+#
+# - cleanup_ipv4_exception
+#      Similar to pmtu_ipv4_vxlan4_exception, but explicitly generate PMTU
+#      exceptions on multiple CPUs and check that the veth device tear-down
+#      happens in a timely manner
+#
+# - cleanup_ipv6_exception
+#      Same as above, but use IPv6 transport from A to B
+
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -135,7 +144,9 @@ tests="
        pmtu_vti6_default_mtu           vti6: default MTU assignment
        pmtu_vti4_link_add_mtu          vti4: MTU setting on link creation
        pmtu_vti6_link_add_mtu          vti6: MTU setting on link creation
-       pmtu_vti6_link_change_mtu       vti6: MTU changes on link changes"
+       pmtu_vti6_link_change_mtu       vti6: MTU changes on link changes
+       cleanup_ipv4_exception          ipv4: cleanup of cached exceptions
+       cleanup_ipv6_exception          ipv6: cleanup of cached exceptions"
 
 NS_A="ns-$(mktemp -u XXXXXX)"
 NS_B="ns-$(mktemp -u XXXXXX)"
@@ -263,8 +274,6 @@ setup_fou_or_gue() {
 
        ${ns_a} ip link set ${encap}_a up
        ${ns_b} ip link set ${encap}_b up
-
-       sleep 1
 }
 
 setup_fou44() {
@@ -302,6 +311,10 @@ setup_gue66() {
 setup_namespaces() {
        for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
                ip netns add ${n} || return 1
+
+               # Disable DAD, so that we don't have to wait to use the
+               # configured IPv6 addresses
+               ip netns exec ${n} sysctl -q net/ipv6/conf/default/accept_dad=0
        done
 }
 
@@ -337,8 +350,6 @@ setup_vti() {
 
        ${ns_a} ip link set vti${proto}_a up
        ${ns_b} ip link set vti${proto}_b up
-
-       sleep 1
 }
 
 setup_vti4() {
@@ -375,8 +386,6 @@ setup_vxlan_or_geneve() {
 
        ${ns_a} ip link set ${type}_a up
        ${ns_b} ip link set ${type}_b up
-
-       sleep 1
 }
 
 setup_geneve4() {
@@ -588,8 +597,8 @@ test_pmtu_ipvX() {
        mtu "${ns_b}"  veth_B-R2 1500
 
        # Create route exceptions
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst1} > /dev/null
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1800 ${dst2} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} > /dev/null
 
        # Check that exceptions have been created with the correct PMTU
        pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
@@ -621,7 +630,7 @@ test_pmtu_ipvX() {
        # Decrease remote MTU on path via R2, get new exception
        mtu "${ns_r2}" veth_R2-B 400
        mtu "${ns_b}"  veth_B-R2 400
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
        pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
        check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
 
@@ -638,7 +647,7 @@ test_pmtu_ipvX() {
        check_pmtu_value "1500" "${pmtu_2}" "increasing local MTU" || return 1
 
        # Get new exception
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s 1400 ${dst2} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1400 ${dst2} > /dev/null
        pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
        check_pmtu_value "lock 552" "${pmtu_2}" "exceeding MTU, with MTU < min_pmtu" || return 1
 }
@@ -687,7 +696,7 @@ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
 
        mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
        mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -767,7 +776,7 @@ test_pmtu_ipvX_over_fouY_or_gueY() {
 
        mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
        mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
-       ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+       ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
@@ -825,13 +834,13 @@ test_pmtu_vti4_exception() {
 
        # Send DF packet without exceeding link layer MTU, check that no
        # exception is created
-       ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+       ${ns_a} ping -q -M want -i 0.1 -w 1 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
        check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
 
        # Now exceed link layer MTU by one byte, check that exception is created
        # with the right PMTU value
-       ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+       ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
        check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
 }
@@ -847,7 +856,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
+       ${ns_a} ${ping6} -q -i 0.1 -w 1 -s 60000 ${tunnel6_b_addr} > /dev/null
 
        # Check that exception was created
        pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
@@ -1008,6 +1017,61 @@ test_pmtu_vti6_link_change_mtu() {
        return ${fail}
 }
 
+check_command() {
+       cmd=${1}
+
+       if ! which ${cmd} > /dev/null 2>&1; then
+               err "  missing required command: '${cmd}'"
+               return 1
+       fi
+       return 0
+}
+
+test_cleanup_vxlanX_exception() {
+       outer="${1}"
+       encap="vxlan"
+       ll_mtu=4000
+
+       check_command taskset || return 2
+       cpu_list=$(grep -m 2 processor /proc/cpuinfo | cut -d ' ' -f 2)
+
+       setup namespaces routing ${encap}${outer} || return 2
+       trace "${ns_a}" ${encap}_a   "${ns_b}"  ${encap}_b \
+             "${ns_a}" veth_A-R1    "${ns_r1}" veth_R1-A \
+             "${ns_b}" veth_B-R1    "${ns_r1}" veth_R1-B
+
+       # Create route exception by exceeding link layer MTU
+       mtu "${ns_a}"  veth_A-R1 $((${ll_mtu} + 1000))
+       mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+       mtu "${ns_b}"  veth_B-R1 ${ll_mtu}
+       mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+       mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+       mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+
+       # Fill exception cache for multiple CPUs (2)
+       # we can always use inner IPv4 for that
+       for cpu in ${cpu_list}; do
+               taskset --cpu-list ${cpu} ${ns_a} ping -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${tunnel4_b_addr} > /dev/null
+       done
+
+       ${ns_a} ip link del dev veth_A-R1 &
+       iplink_pid=$!
+       sleep 1
+       if [ "$(cat /proc/${iplink_pid}/cmdline 2>/dev/null | tr -d '\0')" = "iplinkdeldevveth_A-R1" ]; then
+               err "  can't delete veth device in a timely manner, PMTU dst likely leaked"
+               return 1
+       fi
+}
+
+test_cleanup_ipv6_exception() {
+       test_cleanup_vxlanX_exception 6
+}
+
+test_cleanup_ipv4_exception() {
+       test_cleanup_vxlanX_exception 4
+}
+
 usage() {
        echo
        echo "$0 [OPTIONS] [TEST]..."
index aeac53a99aebba9d5e9033e68744f880b5f8b04c..ac2a30be9b325a4b68a1c1ce74d1fdfabb5b64be 100755 (executable)
@@ -37,7 +37,7 @@ run_one() {
 
        cfg_veth
 
-       ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+       ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} && \
                echo "ok" || \
                echo "failed" &
 
@@ -81,7 +81,7 @@ run_one_nat() {
        # will land on the 'plain' one
        ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
        pid=$!
-       ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+       ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} && \
                echo "ok" || \
                echo "failed"&
 
@@ -99,8 +99,8 @@ run_one_2sock() {
 
        cfg_veth
 
-       ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
-       ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+       ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 &
+       ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} && \
                echo "ok" || \
                echo "failed" &
 
index 0c960f673324072430a5258da15b3eefa03b1d46..db3d4a8b5a4c4f61bd1176dc1cbaa8d3fc7168b0 100644 (file)
@@ -45,6 +45,8 @@ static int  cfg_alen          = sizeof(struct sockaddr_in6);
 static int  cfg_expected_pkt_nr;
 static int  cfg_expected_pkt_len;
 static int  cfg_expected_gso_size;
+static int  cfg_connect_timeout_ms;
+static int  cfg_rcv_timeout_ms;
 static struct sockaddr_storage cfg_bind_addr;
 
 static bool interrupted;
@@ -87,7 +89,7 @@ static unsigned long gettimeofday_ms(void)
        return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
 }
 
-static void do_poll(int fd)
+static void do_poll(int fd, int timeout_ms)
 {
        struct pollfd pfd;
        int ret;
@@ -102,8 +104,16 @@ static void do_poll(int fd)
                        break;
                if (ret == -1)
                        error(1, errno, "poll");
-               if (ret == 0)
-                       continue;
+               if (ret == 0) {
+                       if (!timeout_ms)
+                               continue;
+
+                       timeout_ms -= 10;
+                       if (timeout_ms <= 0) {
+                               interrupted = true;
+                               break;
+                       }
+               }
                if (pfd.revents != POLLIN)
                        error(1, errno, "poll: 0x%x expected 0x%x\n",
                                        pfd.revents, POLLIN);
@@ -134,7 +144,7 @@ static int do_socket(bool do_tcp)
                if (listen(accept_fd, 1))
                        error(1, errno, "listen");
 
-               do_poll(accept_fd);
+               do_poll(accept_fd, cfg_connect_timeout_ms);
                if (interrupted)
                        exit(0);
 
@@ -273,7 +283,9 @@ static void do_flush_udp(int fd)
 
 static void usage(const char *filepath)
 {
-       error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
+       error(1, 0, "Usage: %s [-C connect_timeout] [-Grtv] [-b addr] [-p port]"
+             " [-l pktlen] [-n packetnr] [-R rcv_timeout] [-S gsosize]",
+             filepath);
 }
 
 static void parse_opts(int argc, char **argv)
@@ -282,7 +294,7 @@ static void parse_opts(int argc, char **argv)
 
        /* bind to any by default */
        setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
-       while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
+       while ((c = getopt(argc, argv, "4b:C:Gl:n:p:rR:S:tv")) != -1) {
                switch (c) {
                case '4':
                        cfg_family = PF_INET;
@@ -292,6 +304,9 @@ static void parse_opts(int argc, char **argv)
                case 'b':
                        setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
                        break;
+               case 'C':
+                       cfg_connect_timeout_ms = strtoul(optarg, NULL, 0);
+                       break;
                case 'G':
                        cfg_gro_segment = true;
                        break;
@@ -307,6 +322,9 @@ static void parse_opts(int argc, char **argv)
                case 'r':
                        cfg_read_all = true;
                        break;
+               case 'R':
+                       cfg_rcv_timeout_ms = strtoul(optarg, NULL, 0);
+                       break;
                case 'S':
                        cfg_expected_gso_size = strtol(optarg, NULL, 0);
                        break;
@@ -329,8 +347,9 @@ static void parse_opts(int argc, char **argv)
 
 static void do_recv(void)
 {
+       int timeout_ms = cfg_tcp ? cfg_rcv_timeout_ms : cfg_connect_timeout_ms;
        unsigned long tnow, treport;
-       int fd, loop = 0;
+       int fd;
 
        fd = do_socket(cfg_tcp);
 
@@ -342,12 +361,7 @@ static void do_recv(void)
 
        treport = gettimeofday_ms() + 1000;
        do {
-               /* force termination after the second poll(); this cope both
-                * with sender slower than receiver and missing packet errors
-                */
-               if (cfg_expected_pkt_nr && loop++)
-                       interrupted = true;
-               do_poll(fd);
+               do_poll(fd, timeout_ms);
 
                if (cfg_tcp)
                        do_flush_tcp(fd);
@@ -365,6 +379,8 @@ static void do_recv(void)
                        treport = tnow + 1000;
                }
 
+               timeout_ms = cfg_rcv_timeout_ms;
+
        } while (!interrupted);
 
        if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
index 585845203db89e5d2bc90722fd75bfcf595671d9..076bc38963bf68d36733e4674a1f433a974de047 100644 (file)
@@ -4044,7 +4044,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (kvm->debugfs_dentry) {
+       if (!IS_ERR_OR_NULL(kvm->debugfs_dentry)) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL);
 
                if (p) {